1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #if defined(V8_TARGET_ARCH_ARM)
31
32 #include "ic-inl.h"
33 #include "codegen.h"
34 #include "stub-cache.h"
35
36 namespace v8 {
37 namespace internal {
38
39 #define __ ACCESS_MASM(masm)
40
41
ProbeTable(Isolate * isolate,MacroAssembler * masm,Code::Flags flags,StubCache::Table table,Register receiver,Register name,Register offset,Register scratch,Register scratch2,Register offset_scratch)42 static void ProbeTable(Isolate* isolate,
43 MacroAssembler* masm,
44 Code::Flags flags,
45 StubCache::Table table,
46 Register receiver,
47 Register name,
48 // Number of the cache entry, not scaled.
49 Register offset,
50 Register scratch,
51 Register scratch2,
52 Register offset_scratch) {
53 ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
54 ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
55 ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
56
57 uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
58 uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
59 uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
60
61 // Check the relative positions of the address fields.
62 ASSERT(value_off_addr > key_off_addr);
63 ASSERT((value_off_addr - key_off_addr) % 4 == 0);
64 ASSERT((value_off_addr - key_off_addr) < (256 * 4));
65 ASSERT(map_off_addr > key_off_addr);
66 ASSERT((map_off_addr - key_off_addr) % 4 == 0);
67 ASSERT((map_off_addr - key_off_addr) < (256 * 4));
68
69 Label miss;
70 Register base_addr = scratch;
71 scratch = no_reg;
72
73 // Multiply by 3 because there are 3 fields per entry (name, code, map).
74 __ add(offset_scratch, offset, Operand(offset, LSL, 1));
75
76 // Calculate the base address of the entry.
77 __ mov(base_addr, Operand(key_offset));
78 __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2));
79
80 // Check that the key in the entry matches the name.
81 __ ldr(ip, MemOperand(base_addr, 0));
82 __ cmp(name, ip);
83 __ b(ne, &miss);
84
85 // Check the map matches.
86 __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
87 __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
88 __ cmp(ip, scratch2);
89 __ b(ne, &miss);
90
91 // Get the code entry from the cache.
92 Register code = scratch2;
93 scratch2 = no_reg;
94 __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr));
95
96 // Check that the flags match what we're looking for.
97 Register flags_reg = base_addr;
98 base_addr = no_reg;
99 __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
100 // It's a nice optimization if this constant is encodable in the bic insn.
101
102 uint32_t mask = Code::kFlagsNotUsedInLookup;
103 ASSERT(__ ImmediateFitsAddrMode1Instruction(mask));
104 __ bic(flags_reg, flags_reg, Operand(mask));
105 // Using cmn and the negative instead of cmp means we can use movw.
106 if (flags < 0) {
107 __ cmn(flags_reg, Operand(-flags));
108 } else {
109 __ cmp(flags_reg, Operand(flags));
110 }
111 __ b(ne, &miss);
112
113 #ifdef DEBUG
114 if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
115 __ jmp(&miss);
116 } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
117 __ jmp(&miss);
118 }
119 #endif
120
121 // Jump to the first instruction in the code stub.
122 __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag));
123
124 // Miss: fall through.
125 __ bind(&miss);
126 }
127
128
129 // Helper function used to check that the dictionary doesn't contain
130 // the property. This function may return false negatives, so miss_label
131 // must always call a backup property check that is complete.
132 // This function is safe to call if the receiver has fast properties.
133 // Name must be a symbol and receiver must be a heap object.
GenerateDictionaryNegativeLookup(MacroAssembler * masm,Label * miss_label,Register receiver,Handle<String> name,Register scratch0,Register scratch1)134 static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
135 Label* miss_label,
136 Register receiver,
137 Handle<String> name,
138 Register scratch0,
139 Register scratch1) {
140 ASSERT(name->IsSymbol());
141 Counters* counters = masm->isolate()->counters();
142 __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
143 __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
144
145 Label done;
146
147 const int kInterceptorOrAccessCheckNeededMask =
148 (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
149
150 // Bail out if the receiver has a named interceptor or requires access checks.
151 Register map = scratch1;
152 __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
153 __ ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
154 __ tst(scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
155 __ b(ne, miss_label);
156
157 // Check that receiver is a JSObject.
158 __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
159 __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
160 __ b(lt, miss_label);
161
162 // Load properties array.
163 Register properties = scratch0;
164 __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
165 // Check that the properties array is a dictionary.
166 __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
167 Register tmp = properties;
168 __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
169 __ cmp(map, tmp);
170 __ b(ne, miss_label);
171
172 // Restore the temporarily used register.
173 __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
174
175
176 StringDictionaryLookupStub::GenerateNegativeLookup(masm,
177 miss_label,
178 &done,
179 receiver,
180 properties,
181 name,
182 scratch1);
183 __ bind(&done);
184 __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
185 }
186
187
GenerateProbe(MacroAssembler * masm,Code::Flags flags,Register receiver,Register name,Register scratch,Register extra,Register extra2,Register extra3)188 void StubCache::GenerateProbe(MacroAssembler* masm,
189 Code::Flags flags,
190 Register receiver,
191 Register name,
192 Register scratch,
193 Register extra,
194 Register extra2,
195 Register extra3) {
196 Isolate* isolate = masm->isolate();
197 Label miss;
198
199 // Make sure that code is valid. The multiplying code relies on the
200 // entry size being 12.
201 ASSERT(sizeof(Entry) == 12);
202
203 // Make sure the flags does not name a specific type.
204 ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
205
206 // Make sure that there are no register conflicts.
207 ASSERT(!scratch.is(receiver));
208 ASSERT(!scratch.is(name));
209 ASSERT(!extra.is(receiver));
210 ASSERT(!extra.is(name));
211 ASSERT(!extra.is(scratch));
212 ASSERT(!extra2.is(receiver));
213 ASSERT(!extra2.is(name));
214 ASSERT(!extra2.is(scratch));
215 ASSERT(!extra2.is(extra));
216
217 // Check scratch, extra and extra2 registers are valid.
218 ASSERT(!scratch.is(no_reg));
219 ASSERT(!extra.is(no_reg));
220 ASSERT(!extra2.is(no_reg));
221 ASSERT(!extra3.is(no_reg));
222
223 Counters* counters = masm->isolate()->counters();
224 __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
225 extra2, extra3);
226
227 // Check that the receiver isn't a smi.
228 __ JumpIfSmi(receiver, &miss);
229
230 // Get the map of the receiver and compute the hash.
231 __ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset));
232 __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
233 __ add(scratch, scratch, Operand(ip));
234 uint32_t mask = kPrimaryTableSize - 1;
235 // We shift out the last two bits because they are not part of the hash and
236 // they are always 01 for maps.
237 __ mov(scratch, Operand(scratch, LSR, kHeapObjectTagSize));
238 // Mask down the eor argument to the minimum to keep the immediate
239 // ARM-encodable.
240 __ eor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask));
241 // Prefer and_ to ubfx here because ubfx takes 2 cycles.
242 __ and_(scratch, scratch, Operand(mask));
243
244 // Probe the primary table.
245 ProbeTable(isolate,
246 masm,
247 flags,
248 kPrimary,
249 receiver,
250 name,
251 scratch,
252 extra,
253 extra2,
254 extra3);
255
256 // Primary miss: Compute hash for secondary probe.
257 __ sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize));
258 uint32_t mask2 = kSecondaryTableSize - 1;
259 __ add(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
260 __ and_(scratch, scratch, Operand(mask2));
261
262 // Probe the secondary table.
263 ProbeTable(isolate,
264 masm,
265 flags,
266 kSecondary,
267 receiver,
268 name,
269 scratch,
270 extra,
271 extra2,
272 extra3);
273
274 // Cache miss: Fall-through and let caller handle the miss by
275 // entering the runtime system.
276 __ bind(&miss);
277 __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
278 extra2, extra3);
279 }
280
281
GenerateLoadGlobalFunctionPrototype(MacroAssembler * masm,int index,Register prototype)282 void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
283 int index,
284 Register prototype) {
285 // Load the global or builtins object from the current context.
286 __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
287 // Load the global context from the global or builtins object.
288 __ ldr(prototype,
289 FieldMemOperand(prototype, GlobalObject::kGlobalContextOffset));
290 // Load the function from the global context.
291 __ ldr(prototype, MemOperand(prototype, Context::SlotOffset(index)));
292 // Load the initial map. The global functions all have initial maps.
293 __ ldr(prototype,
294 FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
295 // Load the prototype from the initial map.
296 __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
297 }
298
299
GenerateDirectLoadGlobalFunctionPrototype(MacroAssembler * masm,int index,Register prototype,Label * miss)300 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
301 MacroAssembler* masm,
302 int index,
303 Register prototype,
304 Label* miss) {
305 Isolate* isolate = masm->isolate();
306 // Check we're still in the same context.
307 __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
308 __ Move(ip, isolate->global());
309 __ cmp(prototype, ip);
310 __ b(ne, miss);
311 // Get the global function with the given index.
312 Handle<JSFunction> function(
313 JSFunction::cast(isolate->global_context()->get(index)));
314 // Load its initial map. The global functions all have initial maps.
315 __ Move(prototype, Handle<Map>(function->initial_map()));
316 // Load the prototype from the initial map.
317 __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
318 }
319
320
321 // Load a fast property out of a holder object (src). In-object properties
322 // are loaded directly otherwise the property is loaded from the properties
323 // fixed array.
GenerateFastPropertyLoad(MacroAssembler * masm,Register dst,Register src,Handle<JSObject> holder,int index)324 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
325 Register dst,
326 Register src,
327 Handle<JSObject> holder,
328 int index) {
329 // Adjust for the number of properties stored in the holder.
330 index -= holder->map()->inobject_properties();
331 if (index < 0) {
332 // Get the property straight out of the holder.
333 int offset = holder->map()->instance_size() + (index * kPointerSize);
334 __ ldr(dst, FieldMemOperand(src, offset));
335 } else {
336 // Calculate the offset into the properties array.
337 int offset = index * kPointerSize + FixedArray::kHeaderSize;
338 __ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
339 __ ldr(dst, FieldMemOperand(dst, offset));
340 }
341 }
342
343
GenerateLoadArrayLength(MacroAssembler * masm,Register receiver,Register scratch,Label * miss_label)344 void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
345 Register receiver,
346 Register scratch,
347 Label* miss_label) {
348 // Check that the receiver isn't a smi.
349 __ JumpIfSmi(receiver, miss_label);
350
351 // Check that the object is a JS array.
352 __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
353 __ b(ne, miss_label);
354
355 // Load length directly from the JS array.
356 __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
357 __ Ret();
358 }
359
360
361 // Generate code to check if an object is a string. If the object is a
362 // heap object, its map's instance type is left in the scratch1 register.
363 // If this is not needed, scratch1 and scratch2 may be the same register.
GenerateStringCheck(MacroAssembler * masm,Register receiver,Register scratch1,Register scratch2,Label * smi,Label * non_string_object)364 static void GenerateStringCheck(MacroAssembler* masm,
365 Register receiver,
366 Register scratch1,
367 Register scratch2,
368 Label* smi,
369 Label* non_string_object) {
370 // Check that the receiver isn't a smi.
371 __ JumpIfSmi(receiver, smi);
372
373 // Check that the object is a string.
374 __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
375 __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
376 __ and_(scratch2, scratch1, Operand(kIsNotStringMask));
377 // The cast is to resolve the overload for the argument of 0x0.
378 __ cmp(scratch2, Operand(static_cast<int32_t>(kStringTag)));
379 __ b(ne, non_string_object);
380 }
381
382
383 // Generate code to load the length from a string object and return the length.
384 // If the receiver object is not a string or a wrapped string object the
385 // execution continues at the miss label. The register containing the
386 // receiver is potentially clobbered.
GenerateLoadStringLength(MacroAssembler * masm,Register receiver,Register scratch1,Register scratch2,Label * miss,bool support_wrappers)387 void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
388 Register receiver,
389 Register scratch1,
390 Register scratch2,
391 Label* miss,
392 bool support_wrappers) {
393 Label check_wrapper;
394
395 // Check if the object is a string leaving the instance type in the
396 // scratch1 register.
397 GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
398 support_wrappers ? &check_wrapper : miss);
399
400 // Load length directly from the string.
401 __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
402 __ Ret();
403
404 if (support_wrappers) {
405 // Check if the object is a JSValue wrapper.
406 __ bind(&check_wrapper);
407 __ cmp(scratch1, Operand(JS_VALUE_TYPE));
408 __ b(ne, miss);
409
410 // Unwrap the value and check if the wrapped value is a string.
411 __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
412 GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
413 __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
414 __ Ret();
415 }
416 }
417
418
GenerateLoadFunctionPrototype(MacroAssembler * masm,Register receiver,Register scratch1,Register scratch2,Label * miss_label)419 void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
420 Register receiver,
421 Register scratch1,
422 Register scratch2,
423 Label* miss_label) {
424 __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
425 __ mov(r0, scratch1);
426 __ Ret();
427 }
428
429
430 // Generate StoreField code, value is passed in r0 register.
431 // When leaving generated code after success, the receiver_reg and name_reg
432 // may be clobbered. Upon branch to miss_label, the receiver and name
433 // registers have their original values.
GenerateStoreField(MacroAssembler * masm,Handle<JSObject> object,int index,Handle<Map> transition,Register receiver_reg,Register name_reg,Register scratch,Label * miss_label)434 void StubCompiler::GenerateStoreField(MacroAssembler* masm,
435 Handle<JSObject> object,
436 int index,
437 Handle<Map> transition,
438 Register receiver_reg,
439 Register name_reg,
440 Register scratch,
441 Label* miss_label) {
442 // r0 : value
443 Label exit;
444
445 // Check that the map of the object hasn't changed.
446 CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
447 : REQUIRE_EXACT_MAP;
448 __ CheckMap(receiver_reg, scratch, Handle<Map>(object->map()), miss_label,
449 DO_SMI_CHECK, mode);
450
451 // Perform global security token check if needed.
452 if (object->IsJSGlobalProxy()) {
453 __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
454 }
455
456 // Stub never generated for non-global objects that require access
457 // checks.
458 ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
459
460 // Perform map transition for the receiver if necessary.
461 if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
462 // The properties must be extended before we can store the value.
463 // We jump to a runtime call that extends the properties array.
464 __ push(receiver_reg);
465 __ mov(r2, Operand(transition));
466 __ Push(r2, r0);
467 __ TailCallExternalReference(
468 ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
469 masm->isolate()),
470 3,
471 1);
472 return;
473 }
474
475 if (!transition.is_null()) {
476 // Update the map of the object; no write barrier updating is
477 // needed because the map is never in new space.
478 __ mov(ip, Operand(transition));
479 __ str(ip, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
480 }
481
482 // Adjust for the number of properties stored in the object. Even in the
483 // face of a transition we can use the old map here because the size of the
484 // object and the number of in-object properties is not going to change.
485 index -= object->map()->inobject_properties();
486
487 if (index < 0) {
488 // Set the property straight into the object.
489 int offset = object->map()->instance_size() + (index * kPointerSize);
490 __ str(r0, FieldMemOperand(receiver_reg, offset));
491
492 // Skip updating write barrier if storing a smi.
493 __ JumpIfSmi(r0, &exit);
494
495 // Update the write barrier for the array address.
496 // Pass the now unused name_reg as a scratch register.
497 __ mov(name_reg, r0);
498 __ RecordWriteField(receiver_reg,
499 offset,
500 name_reg,
501 scratch,
502 kLRHasNotBeenSaved,
503 kDontSaveFPRegs);
504 } else {
505 // Write to the properties array.
506 int offset = index * kPointerSize + FixedArray::kHeaderSize;
507 // Get the properties array
508 __ ldr(scratch, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
509 __ str(r0, FieldMemOperand(scratch, offset));
510
511 // Skip updating write barrier if storing a smi.
512 __ JumpIfSmi(r0, &exit);
513
514 // Update the write barrier for the array address.
515 // Ok to clobber receiver_reg and name_reg, since we return.
516 __ mov(name_reg, r0);
517 __ RecordWriteField(scratch,
518 offset,
519 name_reg,
520 receiver_reg,
521 kLRHasNotBeenSaved,
522 kDontSaveFPRegs);
523 }
524
525 // Return the value (register r0).
526 __ bind(&exit);
527 __ Ret();
528 }
529
530
GenerateLoadMiss(MacroAssembler * masm,Code::Kind kind)531 void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
532 ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
533 Handle<Code> code = (kind == Code::LOAD_IC)
534 ? masm->isolate()->builtins()->LoadIC_Miss()
535 : masm->isolate()->builtins()->KeyedLoadIC_Miss();
536 __ Jump(code, RelocInfo::CODE_TARGET);
537 }
538
539
GenerateCallFunction(MacroAssembler * masm,Handle<Object> object,const ParameterCount & arguments,Label * miss,Code::ExtraICState extra_ic_state)540 static void GenerateCallFunction(MacroAssembler* masm,
541 Handle<Object> object,
542 const ParameterCount& arguments,
543 Label* miss,
544 Code::ExtraICState extra_ic_state) {
545 // ----------- S t a t e -------------
546 // -- r0: receiver
547 // -- r1: function to call
548 // -----------------------------------
549
550 // Check that the function really is a function.
551 __ JumpIfSmi(r1, miss);
552 __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
553 __ b(ne, miss);
554
555 // Patch the receiver on the stack with the global proxy if
556 // necessary.
557 if (object->IsGlobalObject()) {
558 __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
559 __ str(r3, MemOperand(sp, arguments.immediate() * kPointerSize));
560 }
561
562 // Invoke the function.
563 CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
564 ? CALL_AS_FUNCTION
565 : CALL_AS_METHOD;
566 __ InvokeFunction(r1, arguments, JUMP_FUNCTION, NullCallWrapper(), call_kind);
567 }
568
569
PushInterceptorArguments(MacroAssembler * masm,Register receiver,Register holder,Register name,Handle<JSObject> holder_obj)570 static void PushInterceptorArguments(MacroAssembler* masm,
571 Register receiver,
572 Register holder,
573 Register name,
574 Handle<JSObject> holder_obj) {
575 __ push(name);
576 Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
577 ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
578 Register scratch = name;
579 __ mov(scratch, Operand(interceptor));
580 __ push(scratch);
581 __ push(receiver);
582 __ push(holder);
583 __ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
584 __ push(scratch);
585 }
586
587
CompileCallLoadPropertyWithInterceptor(MacroAssembler * masm,Register receiver,Register holder,Register name,Handle<JSObject> holder_obj)588 static void CompileCallLoadPropertyWithInterceptor(
589 MacroAssembler* masm,
590 Register receiver,
591 Register holder,
592 Register name,
593 Handle<JSObject> holder_obj) {
594 PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
595
596 ExternalReference ref =
597 ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
598 masm->isolate());
599 __ mov(r0, Operand(5));
600 __ mov(r1, Operand(ref));
601
602 CEntryStub stub(1);
603 __ CallStub(&stub);
604 }
605
606
607 static const int kFastApiCallArguments = 3;
608
609 // Reserves space for the extra arguments to FastHandleApiCall in the
610 // caller's frame.
611 //
612 // These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
ReserveSpaceForFastApiCall(MacroAssembler * masm,Register scratch)613 static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
614 Register scratch) {
615 __ mov(scratch, Operand(Smi::FromInt(0)));
616 for (int i = 0; i < kFastApiCallArguments; i++) {
617 __ push(scratch);
618 }
619 }
620
621
622 // Undoes the effects of ReserveSpaceForFastApiCall.
FreeSpaceForFastApiCall(MacroAssembler * masm)623 static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
624 __ Drop(kFastApiCallArguments);
625 }
626
627
GenerateFastApiDirectCall(MacroAssembler * masm,const CallOptimization & optimization,int argc)628 static void GenerateFastApiDirectCall(MacroAssembler* masm,
629 const CallOptimization& optimization,
630 int argc) {
631 // ----------- S t a t e -------------
632 // -- sp[0] : holder (set by CheckPrototypes)
633 // -- sp[4] : callee JS function
634 // -- sp[8] : call data
635 // -- sp[12] : last JS argument
636 // -- ...
637 // -- sp[(argc + 3) * 4] : first JS argument
638 // -- sp[(argc + 4) * 4] : receiver
639 // -----------------------------------
640 // Get the function and setup the context.
641 Handle<JSFunction> function = optimization.constant_function();
642 __ LoadHeapObject(r5, function);
643 __ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
644
645 // Pass the additional arguments FastHandleApiCall expects.
646 Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
647 Handle<Object> call_data(api_call_info->data());
648 if (masm->isolate()->heap()->InNewSpace(*call_data)) {
649 __ Move(r0, api_call_info);
650 __ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
651 } else {
652 __ Move(r6, call_data);
653 }
654 // Store JS function and call data.
655 __ stm(ib, sp, r5.bit() | r6.bit());
656
657 // r2 points to call data as expected by Arguments
658 // (refer to layout above).
659 __ add(r2, sp, Operand(2 * kPointerSize));
660
661 const int kApiStackSpace = 4;
662
663 FrameScope frame_scope(masm, StackFrame::MANUAL);
664 __ EnterExitFrame(false, kApiStackSpace);
665
666 // r0 = v8::Arguments&
667 // Arguments is after the return address.
668 __ add(r0, sp, Operand(1 * kPointerSize));
669 // v8::Arguments::implicit_args = data
670 __ str(r2, MemOperand(r0, 0 * kPointerSize));
671 // v8::Arguments::values = last argument
672 __ add(ip, r2, Operand(argc * kPointerSize));
673 __ str(ip, MemOperand(r0, 1 * kPointerSize));
674 // v8::Arguments::length_ = argc
675 __ mov(ip, Operand(argc));
676 __ str(ip, MemOperand(r0, 2 * kPointerSize));
677 // v8::Arguments::is_construct_call = 0
678 __ mov(ip, Operand(0));
679 __ str(ip, MemOperand(r0, 3 * kPointerSize));
680
681 const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
682 Address function_address = v8::ToCData<Address>(api_call_info->callback());
683 ApiFunction fun(function_address);
684 ExternalReference ref = ExternalReference(&fun,
685 ExternalReference::DIRECT_API_CALL,
686 masm->isolate());
687 AllowExternalCallThatCantCauseGC scope(masm);
688
689 __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
690 }
691
692
693 class CallInterceptorCompiler BASE_EMBEDDED {
694 public:
CallInterceptorCompiler(StubCompiler * stub_compiler,const ParameterCount & arguments,Register name,Code::ExtraICState extra_ic_state)695 CallInterceptorCompiler(StubCompiler* stub_compiler,
696 const ParameterCount& arguments,
697 Register name,
698 Code::ExtraICState extra_ic_state)
699 : stub_compiler_(stub_compiler),
700 arguments_(arguments),
701 name_(name),
702 extra_ic_state_(extra_ic_state) {}
703
Compile(MacroAssembler * masm,Handle<JSObject> object,Handle<JSObject> holder,Handle<String> name,LookupResult * lookup,Register receiver,Register scratch1,Register scratch2,Register scratch3,Label * miss)704 void Compile(MacroAssembler* masm,
705 Handle<JSObject> object,
706 Handle<JSObject> holder,
707 Handle<String> name,
708 LookupResult* lookup,
709 Register receiver,
710 Register scratch1,
711 Register scratch2,
712 Register scratch3,
713 Label* miss) {
714 ASSERT(holder->HasNamedInterceptor());
715 ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
716
717 // Check that the receiver isn't a smi.
718 __ JumpIfSmi(receiver, miss);
719 CallOptimization optimization(lookup);
720 if (optimization.is_constant_call()) {
721 CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
722 holder, lookup, name, optimization, miss);
723 } else {
724 CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
725 name, holder, miss);
726 }
727 }
728
729 private:
CompileCacheable(MacroAssembler * masm,Handle<JSObject> object,Register receiver,Register scratch1,Register scratch2,Register scratch3,Handle<JSObject> interceptor_holder,LookupResult * lookup,Handle<String> name,const CallOptimization & optimization,Label * miss_label)730 void CompileCacheable(MacroAssembler* masm,
731 Handle<JSObject> object,
732 Register receiver,
733 Register scratch1,
734 Register scratch2,
735 Register scratch3,
736 Handle<JSObject> interceptor_holder,
737 LookupResult* lookup,
738 Handle<String> name,
739 const CallOptimization& optimization,
740 Label* miss_label) {
741 ASSERT(optimization.is_constant_call());
742 ASSERT(!lookup->holder()->IsGlobalObject());
743 Counters* counters = masm->isolate()->counters();
744 int depth1 = kInvalidProtoDepth;
745 int depth2 = kInvalidProtoDepth;
746 bool can_do_fast_api_call = false;
747 if (optimization.is_simple_api_call() &&
748 !lookup->holder()->IsGlobalObject()) {
749 depth1 = optimization.GetPrototypeDepthOfExpectedType(
750 object, interceptor_holder);
751 if (depth1 == kInvalidProtoDepth) {
752 depth2 = optimization.GetPrototypeDepthOfExpectedType(
753 interceptor_holder, Handle<JSObject>(lookup->holder()));
754 }
755 can_do_fast_api_call =
756 depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
757 }
758
759 __ IncrementCounter(counters->call_const_interceptor(), 1,
760 scratch1, scratch2);
761
762 if (can_do_fast_api_call) {
763 __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
764 scratch1, scratch2);
765 ReserveSpaceForFastApiCall(masm, scratch1);
766 }
767
768 // Check that the maps from receiver to interceptor's holder
769 // haven't changed and thus we can invoke interceptor.
770 Label miss_cleanup;
771 Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
772 Register holder =
773 stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
774 scratch1, scratch2, scratch3,
775 name, depth1, miss);
776
777 // Invoke an interceptor and if it provides a value,
778 // branch to |regular_invoke|.
779 Label regular_invoke;
780 LoadWithInterceptor(masm, receiver, holder, interceptor_holder, scratch2,
781 ®ular_invoke);
782
783 // Interceptor returned nothing for this property. Try to use cached
784 // constant function.
785
786 // Check that the maps from interceptor's holder to constant function's
787 // holder haven't changed and thus we can use cached constant function.
788 if (*interceptor_holder != lookup->holder()) {
789 stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
790 Handle<JSObject>(lookup->holder()),
791 scratch1, scratch2, scratch3,
792 name, depth2, miss);
793 } else {
794 // CheckPrototypes has a side effect of fetching a 'holder'
795 // for API (object which is instanceof for the signature). It's
796 // safe to omit it here, as if present, it should be fetched
797 // by the previous CheckPrototypes.
798 ASSERT(depth2 == kInvalidProtoDepth);
799 }
800
801 // Invoke function.
802 if (can_do_fast_api_call) {
803 GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
804 } else {
805 CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
806 ? CALL_AS_FUNCTION
807 : CALL_AS_METHOD;
808 __ InvokeFunction(optimization.constant_function(), arguments_,
809 JUMP_FUNCTION, NullCallWrapper(), call_kind);
810 }
811
812 // Deferred code for fast API call case---clean preallocated space.
813 if (can_do_fast_api_call) {
814 __ bind(&miss_cleanup);
815 FreeSpaceForFastApiCall(masm);
816 __ b(miss_label);
817 }
818
819 // Invoke a regular function.
820 __ bind(®ular_invoke);
821 if (can_do_fast_api_call) {
822 FreeSpaceForFastApiCall(masm);
823 }
824 }
825
CompileRegular(MacroAssembler * masm,Handle<JSObject> object,Register receiver,Register scratch1,Register scratch2,Register scratch3,Handle<String> name,Handle<JSObject> interceptor_holder,Label * miss_label)826 void CompileRegular(MacroAssembler* masm,
827 Handle<JSObject> object,
828 Register receiver,
829 Register scratch1,
830 Register scratch2,
831 Register scratch3,
832 Handle<String> name,
833 Handle<JSObject> interceptor_holder,
834 Label* miss_label) {
835 Register holder =
836 stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
837 scratch1, scratch2, scratch3,
838 name, miss_label);
839
840 // Call a runtime function to load the interceptor property.
841 FrameScope scope(masm, StackFrame::INTERNAL);
842 // Save the name_ register across the call.
843 __ push(name_);
844 PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
845 __ CallExternalReference(
846 ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
847 masm->isolate()),
848 5);
849 // Restore the name_ register.
850 __ pop(name_);
851 // Leave the internal frame.
852 }
853
LoadWithInterceptor(MacroAssembler * masm,Register receiver,Register holder,Handle<JSObject> holder_obj,Register scratch,Label * interceptor_succeeded)854 void LoadWithInterceptor(MacroAssembler* masm,
855 Register receiver,
856 Register holder,
857 Handle<JSObject> holder_obj,
858 Register scratch,
859 Label* interceptor_succeeded) {
860 {
861 FrameScope scope(masm, StackFrame::INTERNAL);
862 __ Push(holder, name_);
863 CompileCallLoadPropertyWithInterceptor(masm,
864 receiver,
865 holder,
866 name_,
867 holder_obj);
868 __ pop(name_); // Restore the name.
869 __ pop(receiver); // Restore the holder.
870 }
871 // If interceptor returns no-result sentinel, call the constant function.
872 __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
873 __ cmp(r0, scratch);
874 __ b(ne, interceptor_succeeded);
875 }
876
877 StubCompiler* stub_compiler_;
878 const ParameterCount& arguments_;
879 Register name_;
880 Code::ExtraICState extra_ic_state_;
881 };
882
883
884 // Generate code to check that a global property cell is empty. Create
885 // the property cell at compilation time if no cell exists for the
886 // property.
GenerateCheckPropertyCell(MacroAssembler * masm,Handle<GlobalObject> global,Handle<String> name,Register scratch,Label * miss)887 static void GenerateCheckPropertyCell(MacroAssembler* masm,
888 Handle<GlobalObject> global,
889 Handle<String> name,
890 Register scratch,
891 Label* miss) {
892 Handle<JSGlobalPropertyCell> cell =
893 GlobalObject::EnsurePropertyCell(global, name);
894 ASSERT(cell->value()->IsTheHole());
895 __ mov(scratch, Operand(cell));
896 __ ldr(scratch,
897 FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
898 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
899 __ cmp(scratch, ip);
900 __ b(ne, miss);
901 }
902
903
904 // Calls GenerateCheckPropertyCell for each global object in the prototype chain
905 // from object to (but not including) holder.
GenerateCheckPropertyCells(MacroAssembler * masm,Handle<JSObject> object,Handle<JSObject> holder,Handle<String> name,Register scratch,Label * miss)906 static void GenerateCheckPropertyCells(MacroAssembler* masm,
907 Handle<JSObject> object,
908 Handle<JSObject> holder,
909 Handle<String> name,
910 Register scratch,
911 Label* miss) {
912 Handle<JSObject> current = object;
913 while (!current.is_identical_to(holder)) {
914 if (current->IsGlobalObject()) {
915 GenerateCheckPropertyCell(masm,
916 Handle<GlobalObject>::cast(current),
917 name,
918 scratch,
919 miss);
920 }
921 current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
922 }
923 }
924
925
926 // Convert and store int passed in register ival to IEEE 754 single precision
927 // floating point value at memory location (dst + 4 * wordoffset)
928 // If VFP3 is available use it for conversion.
StoreIntAsFloat(MacroAssembler * masm,Register dst,Register wordoffset,Register ival,Register fval,Register scratch1,Register scratch2)929 static void StoreIntAsFloat(MacroAssembler* masm,
930 Register dst,
931 Register wordoffset,
932 Register ival,
933 Register fval,
934 Register scratch1,
935 Register scratch2) {
936 if (CpuFeatures::IsSupported(VFP3)) {
937 CpuFeatures::Scope scope(VFP3);
938 __ vmov(s0, ival);
939 __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
940 __ vcvt_f32_s32(s0, s0);
941 __ vstr(s0, scratch1, 0);
942 } else {
943 Label not_special, done;
944 // Move sign bit from source to destination. This works because the sign
945 // bit in the exponent word of the double has the same position and polarity
946 // as the 2's complement sign bit in a Smi.
947 ASSERT(kBinary32SignMask == 0x80000000u);
948
949 __ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
950 // Negate value if it is negative.
951 __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne);
952
953 // We have -1, 0 or 1, which we treat specially. Register ival contains
954 // absolute value: it is either equal to 1 (special case of -1 and 1),
955 // greater than 1 (not a special case) or less than 1 (special case of 0).
956 __ cmp(ival, Operand(1));
957 __ b(gt, ¬_special);
958
959 // For 1 or -1 we need to or in the 0 exponent (biased).
960 static const uint32_t exponent_word_for_1 =
961 kBinary32ExponentBias << kBinary32ExponentShift;
962
963 __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
964 __ b(&done);
965
966 __ bind(¬_special);
967 // Count leading zeros.
968 // Gets the wrong answer for 0, but we already checked for that case above.
969 Register zeros = scratch2;
970 __ CountLeadingZeros(zeros, ival, scratch1);
971
972 // Compute exponent and or it into the exponent register.
973 __ rsb(scratch1,
974 zeros,
975 Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
976
977 __ orr(fval,
978 fval,
979 Operand(scratch1, LSL, kBinary32ExponentShift));
980
981 // Shift up the source chopping the top bit off.
982 __ add(zeros, zeros, Operand(1));
983 // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
984 __ mov(ival, Operand(ival, LSL, zeros));
985 // And the top (top 20 bits).
986 __ orr(fval,
987 fval,
988 Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
989
990 __ bind(&done);
991 __ str(fval, MemOperand(dst, wordoffset, LSL, 2));
992 }
993 }
994
995
996 // Convert unsigned integer with specified number of leading zeroes in binary
997 // representation to IEEE 754 double.
998 // Integer to convert is passed in register hiword.
999 // Resulting double is returned in registers hiword:loword.
1000 // This functions does not work correctly for 0.
GenerateUInt2Double(MacroAssembler * masm,Register hiword,Register loword,Register scratch,int leading_zeroes)1001 static void GenerateUInt2Double(MacroAssembler* masm,
1002 Register hiword,
1003 Register loword,
1004 Register scratch,
1005 int leading_zeroes) {
1006 const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
1007 const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
1008
1009 const int mantissa_shift_for_hi_word =
1010 meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
1011
1012 const int mantissa_shift_for_lo_word =
1013 kBitsPerInt - mantissa_shift_for_hi_word;
1014
1015 __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
1016 if (mantissa_shift_for_hi_word > 0) {
1017 __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
1018 __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
1019 } else {
1020 __ mov(loword, Operand(0, RelocInfo::NONE));
1021 __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
1022 }
1023
1024 // If least significant bit of biased exponent was not 1 it was corrupted
1025 // by most significant bit of mantissa so we should fix that.
1026 if (!(biased_exponent & 1)) {
1027 __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
1028 }
1029 }
1030
1031
1032 #undef __
1033 #define __ ACCESS_MASM(masm())
1034
1035
CheckPrototypes(Handle<JSObject> object,Register object_reg,Handle<JSObject> holder,Register holder_reg,Register scratch1,Register scratch2,Handle<String> name,int save_at_depth,Label * miss)1036 Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
1037 Register object_reg,
1038 Handle<JSObject> holder,
1039 Register holder_reg,
1040 Register scratch1,
1041 Register scratch2,
1042 Handle<String> name,
1043 int save_at_depth,
1044 Label* miss) {
1045 // Make sure there's no overlap between holder and object registers.
1046 ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
1047 ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
1048 && !scratch2.is(scratch1));
1049
1050 // Keep track of the current object in register reg.
1051 Register reg = object_reg;
1052 int depth = 0;
1053
1054 if (save_at_depth == depth) {
1055 __ str(reg, MemOperand(sp));
1056 }
1057
1058 // Check the maps in the prototype chain.
1059 // Traverse the prototype chain from the object and do map checks.
1060 Handle<JSObject> current = object;
1061 while (!current.is_identical_to(holder)) {
1062 ++depth;
1063
1064 // Only global objects and objects that do not require access
1065 // checks are allowed in stubs.
1066 ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
1067
1068 Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
1069 if (!current->HasFastProperties() &&
1070 !current->IsJSGlobalObject() &&
1071 !current->IsJSGlobalProxy()) {
1072 if (!name->IsSymbol()) {
1073 name = factory()->LookupSymbol(name);
1074 }
1075 ASSERT(current->property_dictionary()->FindEntry(*name) ==
1076 StringDictionary::kNotFound);
1077
1078 GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
1079 scratch1, scratch2);
1080
1081 __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
1082 reg = holder_reg; // From now on the object will be in holder_reg.
1083 __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
1084 } else {
1085 Handle<Map> current_map(current->map());
1086 __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK,
1087 ALLOW_ELEMENT_TRANSITION_MAPS);
1088
1089 // Check access rights to the global object. This has to happen after
1090 // the map check so that we know that the object is actually a global
1091 // object.
1092 if (current->IsJSGlobalProxy()) {
1093 __ CheckAccessGlobalProxy(reg, scratch2, miss);
1094 }
1095 reg = holder_reg; // From now on the object will be in holder_reg.
1096
1097 if (heap()->InNewSpace(*prototype)) {
1098 // The prototype is in new space; we cannot store a reference to it
1099 // in the code. Load it from the map.
1100 __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
1101 } else {
1102 // The prototype is in old space; load it directly.
1103 __ mov(reg, Operand(prototype));
1104 }
1105 }
1106
1107 if (save_at_depth == depth) {
1108 __ str(reg, MemOperand(sp));
1109 }
1110
1111 // Go to the next object in the prototype chain.
1112 current = prototype;
1113 }
1114
1115 // Log the check depth.
1116 LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
1117
1118 // Check the holder map.
1119 __ CheckMap(reg, scratch1, Handle<Map>(current->map()), miss,
1120 DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
1121
1122 // Perform security check for access to the global object.
1123 ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
1124 if (holder->IsJSGlobalProxy()) {
1125 __ CheckAccessGlobalProxy(reg, scratch1, miss);
1126 }
1127
1128 // If we've skipped any global objects, it's not enough to verify that
1129 // their maps haven't changed. We also need to check that the property
1130 // cell for the property is still empty.
1131 GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
1132
1133 // Return the register containing the holder.
1134 return reg;
1135 }
1136
1137
GenerateLoadField(Handle<JSObject> object,Handle<JSObject> holder,Register receiver,Register scratch1,Register scratch2,Register scratch3,int index,Handle<String> name,Label * miss)1138 void StubCompiler::GenerateLoadField(Handle<JSObject> object,
1139 Handle<JSObject> holder,
1140 Register receiver,
1141 Register scratch1,
1142 Register scratch2,
1143 Register scratch3,
1144 int index,
1145 Handle<String> name,
1146 Label* miss) {
1147 // Check that the receiver isn't a smi.
1148 __ JumpIfSmi(receiver, miss);
1149
1150 // Check that the maps haven't changed.
1151 Register reg = CheckPrototypes(
1152 object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
1153 GenerateFastPropertyLoad(masm(), r0, reg, holder, index);
1154 __ Ret();
1155 }
1156
1157
GenerateLoadConstant(Handle<JSObject> object,Handle<JSObject> holder,Register receiver,Register scratch1,Register scratch2,Register scratch3,Handle<JSFunction> value,Handle<String> name,Label * miss)1158 void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
1159 Handle<JSObject> holder,
1160 Register receiver,
1161 Register scratch1,
1162 Register scratch2,
1163 Register scratch3,
1164 Handle<JSFunction> value,
1165 Handle<String> name,
1166 Label* miss) {
1167 // Check that the receiver isn't a smi.
1168 __ JumpIfSmi(receiver, miss);
1169
1170 // Check that the maps haven't changed.
1171 CheckPrototypes(
1172 object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
1173
1174 // Return the constant value.
1175 __ LoadHeapObject(r0, value);
1176 __ Ret();
1177 }
1178
1179
GenerateLoadCallback(Handle<JSObject> object,Handle<JSObject> holder,Register receiver,Register name_reg,Register scratch1,Register scratch2,Register scratch3,Handle<AccessorInfo> callback,Handle<String> name,Label * miss)1180 void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
1181 Handle<JSObject> holder,
1182 Register receiver,
1183 Register name_reg,
1184 Register scratch1,
1185 Register scratch2,
1186 Register scratch3,
1187 Handle<AccessorInfo> callback,
1188 Handle<String> name,
1189 Label* miss) {
1190 // Check that the receiver isn't a smi.
1191 __ JumpIfSmi(receiver, miss);
1192
1193 // Check that the maps haven't changed.
1194 Register reg = CheckPrototypes(object, receiver, holder, scratch1,
1195 scratch2, scratch3, name, miss);
1196
1197 // Build AccessorInfo::args_ list on the stack and push property name below
1198 // the exit frame to make GC aware of them and store pointers to them.
1199 __ push(receiver);
1200 __ mov(scratch2, sp); // scratch2 = AccessorInfo::args_
1201 if (heap()->InNewSpace(callback->data())) {
1202 __ Move(scratch3, callback);
1203 __ ldr(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
1204 } else {
1205 __ Move(scratch3, Handle<Object>(callback->data()));
1206 }
1207 __ Push(reg, scratch3, name_reg);
1208 __ mov(r0, sp); // r0 = Handle<String>
1209
1210 const int kApiStackSpace = 1;
1211 FrameScope frame_scope(masm(), StackFrame::MANUAL);
1212 __ EnterExitFrame(false, kApiStackSpace);
1213
1214 // Create AccessorInfo instance on the stack above the exit frame with
1215 // scratch2 (internal::Object** args_) as the data.
1216 __ str(scratch2, MemOperand(sp, 1 * kPointerSize));
1217 __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
1218
1219 const int kStackUnwindSpace = 4;
1220 Address getter_address = v8::ToCData<Address>(callback->getter());
1221 ApiFunction fun(getter_address);
1222 ExternalReference ref =
1223 ExternalReference(&fun,
1224 ExternalReference::DIRECT_GETTER_CALL,
1225 masm()->isolate());
1226 __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
1227 }
1228
1229
GenerateLoadInterceptor(Handle<JSObject> object,Handle<JSObject> interceptor_holder,LookupResult * lookup,Register receiver,Register name_reg,Register scratch1,Register scratch2,Register scratch3,Handle<String> name,Label * miss)1230 void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
1231 Handle<JSObject> interceptor_holder,
1232 LookupResult* lookup,
1233 Register receiver,
1234 Register name_reg,
1235 Register scratch1,
1236 Register scratch2,
1237 Register scratch3,
1238 Handle<String> name,
1239 Label* miss) {
1240 ASSERT(interceptor_holder->HasNamedInterceptor());
1241 ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
1242
1243 // Check that the receiver isn't a smi.
1244 __ JumpIfSmi(receiver, miss);
1245
1246 // So far the most popular follow ups for interceptor loads are FIELD
1247 // and CALLBACKS, so inline only them, other cases may be added
1248 // later.
1249 bool compile_followup_inline = false;
1250 if (lookup->IsFound() && lookup->IsCacheable()) {
1251 if (lookup->type() == FIELD) {
1252 compile_followup_inline = true;
1253 } else if (lookup->type() == CALLBACKS &&
1254 lookup->GetCallbackObject()->IsAccessorInfo()) {
1255 compile_followup_inline =
1256 AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
1257 }
1258 }
1259
1260 if (compile_followup_inline) {
1261 // Compile the interceptor call, followed by inline code to load the
1262 // property from further up the prototype chain if the call fails.
1263 // Check that the maps haven't changed.
1264 Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
1265 scratch1, scratch2, scratch3,
1266 name, miss);
1267 ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
1268
1269 // Preserve the receiver register explicitly whenever it is different from
1270 // the holder and it is needed should the interceptor return without any
1271 // result. The CALLBACKS case needs the receiver to be passed into C++ code,
1272 // the FIELD case might cause a miss during the prototype check.
1273 bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
1274 bool must_preserve_receiver_reg = !receiver.is(holder_reg) &&
1275 (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
1276
1277 // Save necessary data before invoking an interceptor.
1278 // Requires a frame to make GC aware of pushed pointers.
1279 {
1280 FrameScope frame_scope(masm(), StackFrame::INTERNAL);
1281 if (must_preserve_receiver_reg) {
1282 __ Push(receiver, holder_reg, name_reg);
1283 } else {
1284 __ Push(holder_reg, name_reg);
1285 }
1286 // Invoke an interceptor. Note: map checks from receiver to
1287 // interceptor's holder has been compiled before (see a caller
1288 // of this method.)
1289 CompileCallLoadPropertyWithInterceptor(masm(),
1290 receiver,
1291 holder_reg,
1292 name_reg,
1293 interceptor_holder);
1294 // Check if interceptor provided a value for property. If it's
1295 // the case, return immediately.
1296 Label interceptor_failed;
1297 __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
1298 __ cmp(r0, scratch1);
1299 __ b(eq, &interceptor_failed);
1300 frame_scope.GenerateLeaveFrame();
1301 __ Ret();
1302
1303 __ bind(&interceptor_failed);
1304 __ pop(name_reg);
1305 __ pop(holder_reg);
1306 if (must_preserve_receiver_reg) {
1307 __ pop(receiver);
1308 }
1309 // Leave the internal frame.
1310 }
1311 // Check that the maps from interceptor's holder to lookup's holder
1312 // haven't changed. And load lookup's holder into |holder| register.
1313 if (must_perfrom_prototype_check) {
1314 holder_reg = CheckPrototypes(interceptor_holder,
1315 holder_reg,
1316 Handle<JSObject>(lookup->holder()),
1317 scratch1,
1318 scratch2,
1319 scratch3,
1320 name,
1321 miss);
1322 }
1323
1324 if (lookup->type() == FIELD) {
1325 // We found FIELD property in prototype chain of interceptor's holder.
1326 // Retrieve a field from field's holder.
1327 GenerateFastPropertyLoad(masm(), r0, holder_reg,
1328 Handle<JSObject>(lookup->holder()),
1329 lookup->GetFieldIndex());
1330 __ Ret();
1331 } else {
1332 // We found CALLBACKS property in prototype chain of interceptor's
1333 // holder.
1334 ASSERT(lookup->type() == CALLBACKS);
1335 Handle<AccessorInfo> callback(
1336 AccessorInfo::cast(lookup->GetCallbackObject()));
1337 ASSERT(callback->getter() != NULL);
1338
1339 // Tail call to runtime.
1340 // Important invariant in CALLBACKS case: the code above must be
1341 // structured to never clobber |receiver| register.
1342 __ Move(scratch2, callback);
1343 // holder_reg is either receiver or scratch1.
1344 if (!receiver.is(holder_reg)) {
1345 ASSERT(scratch1.is(holder_reg));
1346 __ Push(receiver, holder_reg);
1347 __ ldr(scratch3,
1348 FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
1349 __ Push(scratch3, scratch2, name_reg);
1350 } else {
1351 __ push(receiver);
1352 __ ldr(scratch3,
1353 FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
1354 __ Push(holder_reg, scratch3, scratch2, name_reg);
1355 }
1356
1357 ExternalReference ref =
1358 ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
1359 masm()->isolate());
1360 __ TailCallExternalReference(ref, 5, 1);
1361 }
1362 } else { // !compile_followup_inline
1363 // Call the runtime system to load the interceptor.
1364 // Check that the maps haven't changed.
1365 Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
1366 scratch1, scratch2, scratch3,
1367 name, miss);
1368 PushInterceptorArguments(masm(), receiver, holder_reg,
1369 name_reg, interceptor_holder);
1370
1371 ExternalReference ref =
1372 ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
1373 masm()->isolate());
1374 __ TailCallExternalReference(ref, 5, 1);
1375 }
1376 }
1377
1378
GenerateNameCheck(Handle<String> name,Label * miss)1379 void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
1380 if (kind_ == Code::KEYED_CALL_IC) {
1381 __ cmp(r2, Operand(name));
1382 __ b(ne, miss);
1383 }
1384 }
1385
1386
GenerateGlobalReceiverCheck(Handle<JSObject> object,Handle<JSObject> holder,Handle<String> name,Label * miss)1387 void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
1388 Handle<JSObject> holder,
1389 Handle<String> name,
1390 Label* miss) {
1391 ASSERT(holder->IsGlobalObject());
1392
1393 // Get the number of arguments.
1394 const int argc = arguments().immediate();
1395
1396 // Get the receiver from the stack.
1397 __ ldr(r0, MemOperand(sp, argc * kPointerSize));
1398
1399 // Check that the maps haven't changed.
1400 __ JumpIfSmi(r0, miss);
1401 CheckPrototypes(object, r0, holder, r3, r1, r4, name, miss);
1402 }
1403
1404
GenerateLoadFunctionFromCell(Handle<JSGlobalPropertyCell> cell,Handle<JSFunction> function,Label * miss)1405 void CallStubCompiler::GenerateLoadFunctionFromCell(
1406 Handle<JSGlobalPropertyCell> cell,
1407 Handle<JSFunction> function,
1408 Label* miss) {
1409 // Get the value from the cell.
1410 __ mov(r3, Operand(cell));
1411 __ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
1412
1413 // Check that the cell contains the same function.
1414 if (heap()->InNewSpace(*function)) {
1415 // We can't embed a pointer to a function in new space so we have
1416 // to verify that the shared function info is unchanged. This has
1417 // the nice side effect that multiple closures based on the same
1418 // function can all use this call IC. Before we load through the
1419 // function, we have to verify that it still is a function.
1420 __ JumpIfSmi(r1, miss);
1421 __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
1422 __ b(ne, miss);
1423
1424 // Check the shared function info. Make sure it hasn't changed.
1425 __ Move(r3, Handle<SharedFunctionInfo>(function->shared()));
1426 __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1427 __ cmp(r4, r3);
1428 } else {
1429 __ cmp(r1, Operand(function));
1430 }
1431 __ b(ne, miss);
1432 }
1433
1434
GenerateMissBranch()1435 void CallStubCompiler::GenerateMissBranch() {
1436 Handle<Code> code =
1437 isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
1438 kind_,
1439 extra_state_);
1440 __ Jump(code, RelocInfo::CODE_TARGET);
1441 }
1442
1443
CompileCallField(Handle<JSObject> object,Handle<JSObject> holder,int index,Handle<String> name)1444 Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
1445 Handle<JSObject> holder,
1446 int index,
1447 Handle<String> name) {
1448 // ----------- S t a t e -------------
1449 // -- r2 : name
1450 // -- lr : return address
1451 // -----------------------------------
1452 Label miss;
1453
1454 GenerateNameCheck(name, &miss);
1455
1456 const int argc = arguments().immediate();
1457
1458 // Get the receiver of the function from the stack into r0.
1459 __ ldr(r0, MemOperand(sp, argc * kPointerSize));
1460 // Check that the receiver isn't a smi.
1461 __ JumpIfSmi(r0, &miss);
1462
1463 // Do the right check and compute the holder register.
1464 Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss);
1465 GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
1466
1467 GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
1468
1469 // Handle call cache miss.
1470 __ bind(&miss);
1471 GenerateMissBranch();
1472
1473 // Return the generated code.
1474 return GetCode(FIELD, name);
1475 }
1476
1477
CompileArrayPushCall(Handle<Object> object,Handle<JSObject> holder,Handle<JSGlobalPropertyCell> cell,Handle<JSFunction> function,Handle<String> name)1478 Handle<Code> CallStubCompiler::CompileArrayPushCall(
1479 Handle<Object> object,
1480 Handle<JSObject> holder,
1481 Handle<JSGlobalPropertyCell> cell,
1482 Handle<JSFunction> function,
1483 Handle<String> name) {
1484 // ----------- S t a t e -------------
1485 // -- r2 : name
1486 // -- lr : return address
1487 // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
1488 // -- ...
1489 // -- sp[argc * 4] : receiver
1490 // -----------------------------------
1491
1492 // If object is not an array, bail out to regular call.
1493 if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
1494
1495 Label miss;
1496 GenerateNameCheck(name, &miss);
1497
1498 Register receiver = r1;
1499 // Get the receiver from the stack
1500 const int argc = arguments().immediate();
1501 __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
1502
1503 // Check that the receiver isn't a smi.
1504 __ JumpIfSmi(receiver, &miss);
1505
1506 // Check that the maps haven't changed.
1507 CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, r3, r0, r4,
1508 name, &miss);
1509
1510 if (argc == 0) {
1511 // Nothing to do, just return the length.
1512 __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1513 __ Drop(argc + 1);
1514 __ Ret();
1515 } else {
1516 Label call_builtin;
1517
1518 if (argc == 1) { // Otherwise fall through to call the builtin.
1519 Label attempt_to_grow_elements;
1520
1521 Register elements = r6;
1522 Register end_elements = r5;
1523 // Get the elements array of the object.
1524 __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
1525
1526 // Check that the elements are in fast mode and writable.
1527 __ CheckMap(elements,
1528 r0,
1529 Heap::kFixedArrayMapRootIndex,
1530 &call_builtin,
1531 DONT_DO_SMI_CHECK);
1532
1533
1534 // Get the array's length into r0 and calculate new length.
1535 __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1536 STATIC_ASSERT(kSmiTagSize == 1);
1537 STATIC_ASSERT(kSmiTag == 0);
1538 __ add(r0, r0, Operand(Smi::FromInt(argc)));
1539
1540 // Get the elements' length.
1541 __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
1542
1543 // Check if we could survive without allocation.
1544 __ cmp(r0, r4);
1545 __ b(gt, &attempt_to_grow_elements);
1546
1547 // Check if value is a smi.
1548 Label with_write_barrier;
1549 __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
1550 __ JumpIfNotSmi(r4, &with_write_barrier);
1551
1552 // Save new length.
1553 __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1554
1555 // Store the value.
1556 // We may need a register containing the address end_elements below,
1557 // so write back the value in end_elements.
1558 __ add(end_elements, elements,
1559 Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
1560 const int kEndElementsOffset =
1561 FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
1562 __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
1563
1564 // Check for a smi.
1565 __ Drop(argc + 1);
1566 __ Ret();
1567
1568 __ bind(&with_write_barrier);
1569
1570 __ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
1571
1572 if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
1573 Label fast_object, not_fast_object;
1574 __ CheckFastObjectElements(r3, r7, ¬_fast_object);
1575 __ jmp(&fast_object);
1576 // In case of fast smi-only, convert to fast object, otherwise bail out.
1577 __ bind(¬_fast_object);
1578 __ CheckFastSmiOnlyElements(r3, r7, &call_builtin);
1579 // edx: receiver
1580 // r3: map
1581 __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
1582 FAST_ELEMENTS,
1583 r3,
1584 r7,
1585 &call_builtin);
1586 __ mov(r2, receiver);
1587 ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
1588 __ bind(&fast_object);
1589 } else {
1590 __ CheckFastObjectElements(r3, r3, &call_builtin);
1591 }
1592
1593 // Save new length.
1594 __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1595
1596 // Store the value.
1597 // We may need a register containing the address end_elements below,
1598 // so write back the value in end_elements.
1599 __ add(end_elements, elements,
1600 Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
1601 __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
1602
1603 __ RecordWrite(elements,
1604 end_elements,
1605 r4,
1606 kLRHasNotBeenSaved,
1607 kDontSaveFPRegs,
1608 EMIT_REMEMBERED_SET,
1609 OMIT_SMI_CHECK);
1610 __ Drop(argc + 1);
1611 __ Ret();
1612
1613 __ bind(&attempt_to_grow_elements);
1614 // r0: array's length + 1.
1615 // r4: elements' length.
1616
1617 if (!FLAG_inline_new) {
1618 __ b(&call_builtin);
1619 }
1620
1621 __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize));
1622 // Growing elements that are SMI-only requires special handling in case
1623 // the new element is non-Smi. For now, delegate to the builtin.
1624 Label no_fast_elements_check;
1625 __ JumpIfSmi(r2, &no_fast_elements_check);
1626 __ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset));
1627 __ CheckFastObjectElements(r7, r7, &call_builtin);
1628 __ bind(&no_fast_elements_check);
1629
1630 Isolate* isolate = masm()->isolate();
1631 ExternalReference new_space_allocation_top =
1632 ExternalReference::new_space_allocation_top_address(isolate);
1633 ExternalReference new_space_allocation_limit =
1634 ExternalReference::new_space_allocation_limit_address(isolate);
1635
1636 const int kAllocationDelta = 4;
1637 // Load top and check if it is the end of elements.
1638 __ add(end_elements, elements,
1639 Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
1640 __ add(end_elements, end_elements, Operand(kEndElementsOffset));
1641 __ mov(r7, Operand(new_space_allocation_top));
1642 __ ldr(r3, MemOperand(r7));
1643 __ cmp(end_elements, r3);
1644 __ b(ne, &call_builtin);
1645
1646 __ mov(r9, Operand(new_space_allocation_limit));
1647 __ ldr(r9, MemOperand(r9));
1648 __ add(r3, r3, Operand(kAllocationDelta * kPointerSize));
1649 __ cmp(r3, r9);
1650 __ b(hi, &call_builtin);
1651
1652 // We fit and could grow elements.
1653 // Update new_space_allocation_top.
1654 __ str(r3, MemOperand(r7));
1655 // Push the argument.
1656 __ str(r2, MemOperand(end_elements));
1657 // Fill the rest with holes.
1658 __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
1659 for (int i = 1; i < kAllocationDelta; i++) {
1660 __ str(r3, MemOperand(end_elements, i * kPointerSize));
1661 }
1662
1663 // Update elements' and array's sizes.
1664 __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1665 __ add(r4, r4, Operand(Smi::FromInt(kAllocationDelta)));
1666 __ str(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
1667
1668 // Elements are in new space, so write barrier is not required.
1669 __ Drop(argc + 1);
1670 __ Ret();
1671 }
1672 __ bind(&call_builtin);
1673 __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
1674 masm()->isolate()),
1675 argc + 1,
1676 1);
1677 }
1678
1679 // Handle call cache miss.
1680 __ bind(&miss);
1681 GenerateMissBranch();
1682
1683 // Return the generated code.
1684 return GetCode(function);
1685 }
1686
1687
CompileArrayPopCall(Handle<Object> object,Handle<JSObject> holder,Handle<JSGlobalPropertyCell> cell,Handle<JSFunction> function,Handle<String> name)1688 Handle<Code> CallStubCompiler::CompileArrayPopCall(
1689 Handle<Object> object,
1690 Handle<JSObject> holder,
1691 Handle<JSGlobalPropertyCell> cell,
1692 Handle<JSFunction> function,
1693 Handle<String> name) {
1694 // ----------- S t a t e -------------
1695 // -- r2 : name
1696 // -- lr : return address
1697 // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
1698 // -- ...
1699 // -- sp[argc * 4] : receiver
1700 // -----------------------------------
1701
1702 // If object is not an array, bail out to regular call.
1703 if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
1704
1705 Label miss, return_undefined, call_builtin;
1706 Register receiver = r1;
1707 Register elements = r3;
1708 GenerateNameCheck(name, &miss);
1709
1710 // Get the receiver from the stack
1711 const int argc = arguments().immediate();
1712 __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
1713 // Check that the receiver isn't a smi.
1714 __ JumpIfSmi(receiver, &miss);
1715
1716 // Check that the maps haven't changed.
1717 CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements,
1718 r4, r0, name, &miss);
1719
1720 // Get the elements array of the object.
1721 __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
1722
1723 // Check that the elements are in fast mode and writable.
1724 __ CheckMap(elements,
1725 r0,
1726 Heap::kFixedArrayMapRootIndex,
1727 &call_builtin,
1728 DONT_DO_SMI_CHECK);
1729
1730 // Get the array's length into r4 and calculate new length.
1731 __ ldr(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
1732 __ sub(r4, r4, Operand(Smi::FromInt(1)), SetCC);
1733 __ b(lt, &return_undefined);
1734
1735 // Get the last element.
1736 __ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
1737 STATIC_ASSERT(kSmiTagSize == 1);
1738 STATIC_ASSERT(kSmiTag == 0);
1739 // We can't address the last element in one operation. Compute the more
1740 // expensive shift first, and use an offset later on.
1741 __ add(elements, elements, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
1742 __ ldr(r0, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
1743 __ cmp(r0, r6);
1744 __ b(eq, &call_builtin);
1745
1746 // Set the array's length.
1747 __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
1748
1749 // Fill with the hole.
1750 __ str(r6, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
1751 __ Drop(argc + 1);
1752 __ Ret();
1753
1754 __ bind(&return_undefined);
1755 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
1756 __ Drop(argc + 1);
1757 __ Ret();
1758
1759 __ bind(&call_builtin);
1760 __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop,
1761 masm()->isolate()),
1762 argc + 1,
1763 1);
1764
1765 // Handle call cache miss.
1766 __ bind(&miss);
1767 GenerateMissBranch();
1768
1769 // Return the generated code.
1770 return GetCode(function);
1771 }
1772
1773
CompileStringCharCodeAtCall(Handle<Object> object,Handle<JSObject> holder,Handle<JSGlobalPropertyCell> cell,Handle<JSFunction> function,Handle<String> name)1774 Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
1775 Handle<Object> object,
1776 Handle<JSObject> holder,
1777 Handle<JSGlobalPropertyCell> cell,
1778 Handle<JSFunction> function,
1779 Handle<String> name) {
1780 // ----------- S t a t e -------------
1781 // -- r2 : function name
1782 // -- lr : return address
1783 // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
1784 // -- ...
1785 // -- sp[argc * 4] : receiver
1786 // -----------------------------------
1787
1788 // If object is not a string, bail out to regular call.
1789 if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
1790
1791 const int argc = arguments().immediate();
1792 Label miss;
1793 Label name_miss;
1794 Label index_out_of_range;
1795 Label* index_out_of_range_label = &index_out_of_range;
1796
1797 if (kind_ == Code::CALL_IC &&
1798 (CallICBase::StringStubState::decode(extra_state_) ==
1799 DEFAULT_STRING_STUB)) {
1800 index_out_of_range_label = &miss;
1801 }
1802 GenerateNameCheck(name, &name_miss);
1803
1804 // Check that the maps starting from the prototype haven't changed.
1805 GenerateDirectLoadGlobalFunctionPrototype(masm(),
1806 Context::STRING_FUNCTION_INDEX,
1807 r0,
1808 &miss);
1809 ASSERT(!object.is_identical_to(holder));
1810 CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
1811 r0, holder, r1, r3, r4, name, &miss);
1812
1813 Register receiver = r1;
1814 Register index = r4;
1815 Register result = r0;
1816 __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
1817 if (argc > 0) {
1818 __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
1819 } else {
1820 __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
1821 }
1822
1823 StringCharCodeAtGenerator generator(receiver,
1824 index,
1825 result,
1826 &miss, // When not a string.
1827 &miss, // When not a number.
1828 index_out_of_range_label,
1829 STRING_INDEX_IS_NUMBER);
1830 generator.GenerateFast(masm());
1831 __ Drop(argc + 1);
1832 __ Ret();
1833
1834 StubRuntimeCallHelper call_helper;
1835 generator.GenerateSlow(masm(), call_helper);
1836
1837 if (index_out_of_range.is_linked()) {
1838 __ bind(&index_out_of_range);
1839 __ LoadRoot(r0, Heap::kNanValueRootIndex);
1840 __ Drop(argc + 1);
1841 __ Ret();
1842 }
1843
1844 __ bind(&miss);
1845 // Restore function name in r2.
1846 __ Move(r2, name);
1847 __ bind(&name_miss);
1848 GenerateMissBranch();
1849
1850 // Return the generated code.
1851 return GetCode(function);
1852 }
1853
1854
CompileStringCharAtCall(Handle<Object> object,Handle<JSObject> holder,Handle<JSGlobalPropertyCell> cell,Handle<JSFunction> function,Handle<String> name)1855 Handle<Code> CallStubCompiler::CompileStringCharAtCall(
1856 Handle<Object> object,
1857 Handle<JSObject> holder,
1858 Handle<JSGlobalPropertyCell> cell,
1859 Handle<JSFunction> function,
1860 Handle<String> name) {
1861 // ----------- S t a t e -------------
1862 // -- r2 : function name
1863 // -- lr : return address
1864 // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
1865 // -- ...
1866 // -- sp[argc * 4] : receiver
1867 // -----------------------------------
1868
1869 // If object is not a string, bail out to regular call.
1870 if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
1871
1872 const int argc = arguments().immediate();
1873 Label miss;
1874 Label name_miss;
1875 Label index_out_of_range;
1876 Label* index_out_of_range_label = &index_out_of_range;
1877 if (kind_ == Code::CALL_IC &&
1878 (CallICBase::StringStubState::decode(extra_state_) ==
1879 DEFAULT_STRING_STUB)) {
1880 index_out_of_range_label = &miss;
1881 }
1882 GenerateNameCheck(name, &name_miss);
1883
1884 // Check that the maps starting from the prototype haven't changed.
1885 GenerateDirectLoadGlobalFunctionPrototype(masm(),
1886 Context::STRING_FUNCTION_INDEX,
1887 r0,
1888 &miss);
1889 ASSERT(!object.is_identical_to(holder));
1890 CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
1891 r0, holder, r1, r3, r4, name, &miss);
1892
1893 Register receiver = r0;
1894 Register index = r4;
1895 Register scratch = r3;
1896 Register result = r0;
1897 __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
1898 if (argc > 0) {
1899 __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
1900 } else {
1901 __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
1902 }
1903
1904 StringCharAtGenerator generator(receiver,
1905 index,
1906 scratch,
1907 result,
1908 &miss, // When not a string.
1909 &miss, // When not a number.
1910 index_out_of_range_label,
1911 STRING_INDEX_IS_NUMBER);
1912 generator.GenerateFast(masm());
1913 __ Drop(argc + 1);
1914 __ Ret();
1915
1916 StubRuntimeCallHelper call_helper;
1917 generator.GenerateSlow(masm(), call_helper);
1918
1919 if (index_out_of_range.is_linked()) {
1920 __ bind(&index_out_of_range);
1921 __ LoadRoot(r0, Heap::kEmptyStringRootIndex);
1922 __ Drop(argc + 1);
1923 __ Ret();
1924 }
1925
1926 __ bind(&miss);
1927 // Restore function name in r2.
1928 __ Move(r2, name);
1929 __ bind(&name_miss);
1930 GenerateMissBranch();
1931
1932 // Return the generated code.
1933 return GetCode(function);
1934 }
1935
1936
CompileStringFromCharCodeCall(Handle<Object> object,Handle<JSObject> holder,Handle<JSGlobalPropertyCell> cell,Handle<JSFunction> function,Handle<String> name)1937 Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
1938 Handle<Object> object,
1939 Handle<JSObject> holder,
1940 Handle<JSGlobalPropertyCell> cell,
1941 Handle<JSFunction> function,
1942 Handle<String> name) {
1943 // ----------- S t a t e -------------
1944 // -- r2 : function name
1945 // -- lr : return address
1946 // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
1947 // -- ...
1948 // -- sp[argc * 4] : receiver
1949 // -----------------------------------
1950
1951 const int argc = arguments().immediate();
1952
1953 // If the object is not a JSObject or we got an unexpected number of
1954 // arguments, bail out to the regular call.
1955 if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
1956
1957 Label miss;
1958 GenerateNameCheck(name, &miss);
1959
1960 if (cell.is_null()) {
1961 __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
1962
1963 STATIC_ASSERT(kSmiTag == 0);
1964 __ JumpIfSmi(r1, &miss);
1965
1966 CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
1967 name, &miss);
1968 } else {
1969 ASSERT(cell->value() == *function);
1970 GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
1971 &miss);
1972 GenerateLoadFunctionFromCell(cell, function, &miss);
1973 }
1974
1975 // Load the char code argument.
1976 Register code = r1;
1977 __ ldr(code, MemOperand(sp, 0 * kPointerSize));
1978
1979 // Check the code is a smi.
1980 Label slow;
1981 STATIC_ASSERT(kSmiTag == 0);
1982 __ JumpIfNotSmi(code, &slow);
1983
1984 // Convert the smi code to uint16.
1985 __ and_(code, code, Operand(Smi::FromInt(0xffff)));
1986
1987 StringCharFromCodeGenerator generator(code, r0);
1988 generator.GenerateFast(masm());
1989 __ Drop(argc + 1);
1990 __ Ret();
1991
1992 StubRuntimeCallHelper call_helper;
1993 generator.GenerateSlow(masm(), call_helper);
1994
1995 // Tail call the full function. We do not have to patch the receiver
1996 // because the function makes no use of it.
1997 __ bind(&slow);
1998 __ InvokeFunction(
1999 function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
2000
2001 __ bind(&miss);
2002 // r2: function name.
2003 GenerateMissBranch();
2004
2005 // Return the generated code.
2006 return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
2007 }
2008
2009
CompileMathFloorCall(Handle<Object> object,Handle<JSObject> holder,Handle<JSGlobalPropertyCell> cell,Handle<JSFunction> function,Handle<String> name)2010 Handle<Code> CallStubCompiler::CompileMathFloorCall(
2011 Handle<Object> object,
2012 Handle<JSObject> holder,
2013 Handle<JSGlobalPropertyCell> cell,
2014 Handle<JSFunction> function,
2015 Handle<String> name) {
2016 // ----------- S t a t e -------------
2017 // -- r2 : function name
2018 // -- lr : return address
2019 // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
2020 // -- ...
2021 // -- sp[argc * 4] : receiver
2022 // -----------------------------------
2023
2024 if (!CpuFeatures::IsSupported(VFP3)) {
2025 return Handle<Code>::null();
2026 }
2027
2028 CpuFeatures::Scope scope_vfp3(VFP3);
2029 const int argc = arguments().immediate();
2030 // If the object is not a JSObject or we got an unexpected number of
2031 // arguments, bail out to the regular call.
2032 if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
2033
2034 Label miss, slow;
2035 GenerateNameCheck(name, &miss);
2036
2037 if (cell.is_null()) {
2038 __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
2039 STATIC_ASSERT(kSmiTag == 0);
2040 __ JumpIfSmi(r1, &miss);
2041 CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
2042 name, &miss);
2043 } else {
2044 ASSERT(cell->value() == *function);
2045 GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
2046 &miss);
2047 GenerateLoadFunctionFromCell(cell, function, &miss);
2048 }
2049
2050 // Load the (only) argument into r0.
2051 __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
2052
2053 // If the argument is a smi, just return.
2054 STATIC_ASSERT(kSmiTag == 0);
2055 __ tst(r0, Operand(kSmiTagMask));
2056 __ Drop(argc + 1, eq);
2057 __ Ret(eq);
2058
2059 __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
2060
2061 Label wont_fit_smi, no_vfp_exception, restore_fpscr_and_return;
2062
2063 // If vfp3 is enabled, we use the fpu rounding with the RM (round towards
2064 // minus infinity) mode.
2065
2066 // Load the HeapNumber value.
2067 // We will need access to the value in the core registers, so we load it
2068 // with ldrd and move it to the fpu. It also spares a sub instruction for
2069 // updating the HeapNumber value address, as vldr expects a multiple
2070 // of 4 offset.
2071 __ Ldrd(r4, r5, FieldMemOperand(r0, HeapNumber::kValueOffset));
2072 __ vmov(d1, r4, r5);
2073
2074 // Backup FPSCR.
2075 __ vmrs(r3);
2076 // Set custom FPCSR:
2077 // - Set rounding mode to "Round towards Minus Infinity"
2078 // (i.e. bits [23:22] = 0b10).
2079 // - Clear vfp cumulative exception flags (bits [3:0]).
2080 // - Make sure Flush-to-zero mode control bit is unset (bit 22).
2081 __ bic(r9, r3,
2082 Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask));
2083 __ orr(r9, r9, Operand(kRoundToMinusInf));
2084 __ vmsr(r9);
2085
2086 // Convert the argument to an integer.
2087 __ vcvt_s32_f64(s0, d1, kFPSCRRounding);
2088
2089 // Use vcvt latency to start checking for special cases.
2090 // Get the argument exponent and clear the sign bit.
2091 __ bic(r6, r5, Operand(HeapNumber::kSignMask));
2092 __ mov(r6, Operand(r6, LSR, HeapNumber::kMantissaBitsInTopWord));
2093
2094 // Retrieve FPSCR and check for vfp exceptions.
2095 __ vmrs(r9);
2096 __ tst(r9, Operand(kVFPExceptionMask));
2097 __ b(&no_vfp_exception, eq);
2098
2099 // Check for NaN, Infinity, and -Infinity.
2100 // They are invariant through a Math.Floor call, so just
2101 // return the original argument.
2102 __ sub(r7, r6, Operand(HeapNumber::kExponentMask
2103 >> HeapNumber::kMantissaBitsInTopWord), SetCC);
2104 __ b(&restore_fpscr_and_return, eq);
2105 // We had an overflow or underflow in the conversion. Check if we
2106 // have a big exponent.
2107 __ cmp(r7, Operand(HeapNumber::kMantissaBits));
2108 // If greater or equal, the argument is already round and in r0.
2109 __ b(&restore_fpscr_and_return, ge);
2110 __ b(&wont_fit_smi);
2111
2112 __ bind(&no_vfp_exception);
2113 // Move the result back to general purpose register r0.
2114 __ vmov(r0, s0);
2115 // Check if the result fits into a smi.
2116 __ add(r1, r0, Operand(0x40000000), SetCC);
2117 __ b(&wont_fit_smi, mi);
2118 // Tag the result.
2119 STATIC_ASSERT(kSmiTag == 0);
2120 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
2121
2122 // Check for -0.
2123 __ cmp(r0, Operand(0, RelocInfo::NONE));
2124 __ b(&restore_fpscr_and_return, ne);
2125 // r5 already holds the HeapNumber exponent.
2126 __ tst(r5, Operand(HeapNumber::kSignMask));
2127 // If our HeapNumber is negative it was -0, so load its address and return.
2128 // Else r0 is loaded with 0, so we can also just return.
2129 __ ldr(r0, MemOperand(sp, 0 * kPointerSize), ne);
2130
2131 __ bind(&restore_fpscr_and_return);
2132 // Restore FPSCR and return.
2133 __ vmsr(r3);
2134 __ Drop(argc + 1);
2135 __ Ret();
2136
2137 __ bind(&wont_fit_smi);
2138 // Restore FPCSR and fall to slow case.
2139 __ vmsr(r3);
2140
2141 __ bind(&slow);
2142 // Tail call the full function. We do not have to patch the receiver
2143 // because the function makes no use of it.
2144 __ InvokeFunction(
2145 function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
2146
2147 __ bind(&miss);
2148 // r2: function name.
2149 GenerateMissBranch();
2150
2151 // Return the generated code.
2152 return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
2153 }
2154
2155
CompileMathAbsCall(Handle<Object> object,Handle<JSObject> holder,Handle<JSGlobalPropertyCell> cell,Handle<JSFunction> function,Handle<String> name)2156 Handle<Code> CallStubCompiler::CompileMathAbsCall(
2157 Handle<Object> object,
2158 Handle<JSObject> holder,
2159 Handle<JSGlobalPropertyCell> cell,
2160 Handle<JSFunction> function,
2161 Handle<String> name) {
2162 // ----------- S t a t e -------------
2163 // -- r2 : function name
2164 // -- lr : return address
2165 // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
2166 // -- ...
2167 // -- sp[argc * 4] : receiver
2168 // -----------------------------------
2169
2170 const int argc = arguments().immediate();
2171 // If the object is not a JSObject or we got an unexpected number of
2172 // arguments, bail out to the regular call.
2173 if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
2174
2175 Label miss;
2176 GenerateNameCheck(name, &miss);
2177 if (cell.is_null()) {
2178 __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
2179 STATIC_ASSERT(kSmiTag == 0);
2180 __ JumpIfSmi(r1, &miss);
2181 CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
2182 name, &miss);
2183 } else {
2184 ASSERT(cell->value() == *function);
2185 GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
2186 &miss);
2187 GenerateLoadFunctionFromCell(cell, function, &miss);
2188 }
2189
2190 // Load the (only) argument into r0.
2191 __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
2192
2193 // Check if the argument is a smi.
2194 Label not_smi;
2195 STATIC_ASSERT(kSmiTag == 0);
2196 __ JumpIfNotSmi(r0, ¬_smi);
2197
2198 // Do bitwise not or do nothing depending on the sign of the
2199 // argument.
2200 __ eor(r1, r0, Operand(r0, ASR, kBitsPerInt - 1));
2201
2202 // Add 1 or do nothing depending on the sign of the argument.
2203 __ sub(r0, r1, Operand(r0, ASR, kBitsPerInt - 1), SetCC);
2204
2205 // If the result is still negative, go to the slow case.
2206 // This only happens for the most negative smi.
2207 Label slow;
2208 __ b(mi, &slow);
2209
2210 // Smi case done.
2211 __ Drop(argc + 1);
2212 __ Ret();
2213
2214 // Check if the argument is a heap number and load its exponent and
2215 // sign.
2216 __ bind(¬_smi);
2217 __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
2218 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
2219
2220 // Check the sign of the argument. If the argument is positive,
2221 // just return it.
2222 Label negative_sign;
2223 __ tst(r1, Operand(HeapNumber::kSignMask));
2224 __ b(ne, &negative_sign);
2225 __ Drop(argc + 1);
2226 __ Ret();
2227
2228 // If the argument is negative, clear the sign, and return a new
2229 // number.
2230 __ bind(&negative_sign);
2231 __ eor(r1, r1, Operand(HeapNumber::kSignMask));
2232 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
2233 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
2234 __ AllocateHeapNumber(r0, r4, r5, r6, &slow);
2235 __ str(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
2236 __ str(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
2237 __ Drop(argc + 1);
2238 __ Ret();
2239
2240 // Tail call the full function. We do not have to patch the receiver
2241 // because the function makes no use of it.
2242 __ bind(&slow);
2243 __ InvokeFunction(
2244 function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
2245
2246 __ bind(&miss);
2247 // r2: function name.
2248 GenerateMissBranch();
2249
2250 // Return the generated code.
2251 return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
2252 }
2253
2254
CompileFastApiCall(const CallOptimization & optimization,Handle<Object> object,Handle<JSObject> holder,Handle<JSGlobalPropertyCell> cell,Handle<JSFunction> function,Handle<String> name)2255 Handle<Code> CallStubCompiler::CompileFastApiCall(
2256 const CallOptimization& optimization,
2257 Handle<Object> object,
2258 Handle<JSObject> holder,
2259 Handle<JSGlobalPropertyCell> cell,
2260 Handle<JSFunction> function,
2261 Handle<String> name) {
2262 Counters* counters = isolate()->counters();
2263
2264 ASSERT(optimization.is_simple_api_call());
2265 // Bail out if object is a global object as we don't want to
2266 // repatch it to global receiver.
2267 if (object->IsGlobalObject()) return Handle<Code>::null();
2268 if (!cell.is_null()) return Handle<Code>::null();
2269 if (!object->IsJSObject()) return Handle<Code>::null();
2270 int depth = optimization.GetPrototypeDepthOfExpectedType(
2271 Handle<JSObject>::cast(object), holder);
2272 if (depth == kInvalidProtoDepth) return Handle<Code>::null();
2273
2274 Label miss, miss_before_stack_reserved;
2275 GenerateNameCheck(name, &miss_before_stack_reserved);
2276
2277 // Get the receiver from the stack.
2278 const int argc = arguments().immediate();
2279 __ ldr(r1, MemOperand(sp, argc * kPointerSize));
2280
2281 // Check that the receiver isn't a smi.
2282 __ JumpIfSmi(r1, &miss_before_stack_reserved);
2283
2284 __ IncrementCounter(counters->call_const(), 1, r0, r3);
2285 __ IncrementCounter(counters->call_const_fast_api(), 1, r0, r3);
2286
2287 ReserveSpaceForFastApiCall(masm(), r0);
2288
2289 // Check that the maps haven't changed and find a Holder as a side effect.
2290 CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, name,
2291 depth, &miss);
2292
2293 GenerateFastApiDirectCall(masm(), optimization, argc);
2294
2295 __ bind(&miss);
2296 FreeSpaceForFastApiCall(masm());
2297
2298 __ bind(&miss_before_stack_reserved);
2299 GenerateMissBranch();
2300
2301 // Return the generated code.
2302 return GetCode(function);
2303 }
2304
2305
CompileCallConstant(Handle<Object> object,Handle<JSObject> holder,Handle<JSFunction> function,Handle<String> name,CheckType check)2306 Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
2307 Handle<JSObject> holder,
2308 Handle<JSFunction> function,
2309 Handle<String> name,
2310 CheckType check) {
2311 // ----------- S t a t e -------------
2312 // -- r2 : name
2313 // -- lr : return address
2314 // -----------------------------------
2315 if (HasCustomCallGenerator(function)) {
2316 Handle<Code> code = CompileCustomCall(object, holder,
2317 Handle<JSGlobalPropertyCell>::null(),
2318 function, name);
2319 // A null handle means bail out to the regular compiler code below.
2320 if (!code.is_null()) return code;
2321 }
2322
2323 Label miss;
2324 GenerateNameCheck(name, &miss);
2325
2326 // Get the receiver from the stack
2327 const int argc = arguments().immediate();
2328 __ ldr(r1, MemOperand(sp, argc * kPointerSize));
2329
2330 // Check that the receiver isn't a smi.
2331 if (check != NUMBER_CHECK) {
2332 __ JumpIfSmi(r1, &miss);
2333 }
2334
2335 // Make sure that it's okay not to patch the on stack receiver
2336 // unless we're doing a receiver map check.
2337 ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
2338 switch (check) {
2339 case RECEIVER_MAP_CHECK:
2340 __ IncrementCounter(masm()->isolate()->counters()->call_const(),
2341 1, r0, r3);
2342
2343 // Check that the maps haven't changed.
2344 CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
2345 name, &miss);
2346
2347 // Patch the receiver on the stack with the global proxy if
2348 // necessary.
2349 if (object->IsGlobalObject()) {
2350 __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
2351 __ str(r3, MemOperand(sp, argc * kPointerSize));
2352 }
2353 break;
2354
2355 case STRING_CHECK:
2356 if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
2357 // Check that the object is a two-byte string or a symbol.
2358 __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
2359 __ b(ge, &miss);
2360 // Check that the maps starting from the prototype haven't changed.
2361 GenerateDirectLoadGlobalFunctionPrototype(
2362 masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
2363 CheckPrototypes(
2364 Handle<JSObject>(JSObject::cast(object->GetPrototype())),
2365 r0, holder, r3, r1, r4, name, &miss);
2366 } else {
2367 // Calling non-strict non-builtins with a value as the receiver
2368 // requires boxing.
2369 __ jmp(&miss);
2370 }
2371 break;
2372
2373 case NUMBER_CHECK:
2374 if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
2375 Label fast;
2376 // Check that the object is a smi or a heap number.
2377 __ JumpIfSmi(r1, &fast);
2378 __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE);
2379 __ b(ne, &miss);
2380 __ bind(&fast);
2381 // Check that the maps starting from the prototype haven't changed.
2382 GenerateDirectLoadGlobalFunctionPrototype(
2383 masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
2384 CheckPrototypes(
2385 Handle<JSObject>(JSObject::cast(object->GetPrototype())),
2386 r0, holder, r3, r1, r4, name, &miss);
2387 } else {
2388 // Calling non-strict non-builtins with a value as the receiver
2389 // requires boxing.
2390 __ jmp(&miss);
2391 }
2392 break;
2393
2394 case BOOLEAN_CHECK:
2395 if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
2396 Label fast;
2397 // Check that the object is a boolean.
2398 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
2399 __ cmp(r1, ip);
2400 __ b(eq, &fast);
2401 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
2402 __ cmp(r1, ip);
2403 __ b(ne, &miss);
2404 __ bind(&fast);
2405 // Check that the maps starting from the prototype haven't changed.
2406 GenerateDirectLoadGlobalFunctionPrototype(
2407 masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
2408 CheckPrototypes(
2409 Handle<JSObject>(JSObject::cast(object->GetPrototype())),
2410 r0, holder, r3, r1, r4, name, &miss);
2411 } else {
2412 // Calling non-strict non-builtins with a value as the receiver
2413 // requires boxing.
2414 __ jmp(&miss);
2415 }
2416 break;
2417 }
2418
2419 CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
2420 ? CALL_AS_FUNCTION
2421 : CALL_AS_METHOD;
2422 __ InvokeFunction(
2423 function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind);
2424
2425 // Handle call cache miss.
2426 __ bind(&miss);
2427 GenerateMissBranch();
2428
2429 // Return the generated code.
2430 return GetCode(function);
2431 }
2432
2433
CompileCallInterceptor(Handle<JSObject> object,Handle<JSObject> holder,Handle<String> name)2434 Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
2435 Handle<JSObject> holder,
2436 Handle<String> name) {
2437 // ----------- S t a t e -------------
2438 // -- r2 : name
2439 // -- lr : return address
2440 // -----------------------------------
2441 Label miss;
2442 GenerateNameCheck(name, &miss);
2443
2444 // Get the number of arguments.
2445 const int argc = arguments().immediate();
2446 LookupResult lookup(isolate());
2447 LookupPostInterceptor(holder, name, &lookup);
2448
2449 // Get the receiver from the stack.
2450 __ ldr(r1, MemOperand(sp, argc * kPointerSize));
2451
2452 CallInterceptorCompiler compiler(this, arguments(), r2, extra_state_);
2453 compiler.Compile(masm(), object, holder, name, &lookup, r1, r3, r4, r0,
2454 &miss);
2455
2456 // Move returned value, the function to call, to r1.
2457 __ mov(r1, r0);
2458 // Restore receiver.
2459 __ ldr(r0, MemOperand(sp, argc * kPointerSize));
2460
2461 GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
2462
2463 // Handle call cache miss.
2464 __ bind(&miss);
2465 GenerateMissBranch();
2466
2467 // Return the generated code.
2468 return GetCode(INTERCEPTOR, name);
2469 }
2470
2471
CompileCallGlobal(Handle<JSObject> object,Handle<GlobalObject> holder,Handle<JSGlobalPropertyCell> cell,Handle<JSFunction> function,Handle<String> name)2472 Handle<Code> CallStubCompiler::CompileCallGlobal(
2473 Handle<JSObject> object,
2474 Handle<GlobalObject> holder,
2475 Handle<JSGlobalPropertyCell> cell,
2476 Handle<JSFunction> function,
2477 Handle<String> name) {
2478 // ----------- S t a t e -------------
2479 // -- r2 : name
2480 // -- lr : return address
2481 // -----------------------------------
2482 if (HasCustomCallGenerator(function)) {
2483 Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
2484 // A null handle means bail out to the regular compiler code below.
2485 if (!code.is_null()) return code;
2486 }
2487
2488 Label miss;
2489 GenerateNameCheck(name, &miss);
2490
2491 // Get the number of arguments.
2492 const int argc = arguments().immediate();
2493 GenerateGlobalReceiverCheck(object, holder, name, &miss);
2494 GenerateLoadFunctionFromCell(cell, function, &miss);
2495
2496 // Patch the receiver on the stack with the global proxy if
2497 // necessary.
2498 if (object->IsGlobalObject()) {
2499 __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
2500 __ str(r3, MemOperand(sp, argc * kPointerSize));
2501 }
2502
2503 // Set up the context (function already in r1).
2504 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
2505
2506 // Jump to the cached code (tail call).
2507 Counters* counters = masm()->isolate()->counters();
2508 __ IncrementCounter(counters->call_global_inline(), 1, r3, r4);
2509 ParameterCount expected(function->shared()->formal_parameter_count());
2510 CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
2511 ? CALL_AS_FUNCTION
2512 : CALL_AS_METHOD;
2513 // We call indirectly through the code field in the function to
2514 // allow recompilation to take effect without changing any of the
2515 // call sites.
2516 __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
2517 __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION,
2518 NullCallWrapper(), call_kind);
2519
2520 // Handle call cache miss.
2521 __ bind(&miss);
2522 __ IncrementCounter(counters->call_global_inline_miss(), 1, r1, r3);
2523 GenerateMissBranch();
2524
2525 // Return the generated code.
2526 return GetCode(NORMAL, name);
2527 }
2528
2529
CompileStoreField(Handle<JSObject> object,int index,Handle<Map> transition,Handle<String> name)2530 Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
2531 int index,
2532 Handle<Map> transition,
2533 Handle<String> name) {
2534 // ----------- S t a t e -------------
2535 // -- r0 : value
2536 // -- r1 : receiver
2537 // -- r2 : name
2538 // -- lr : return address
2539 // -----------------------------------
2540 Label miss;
2541
2542 GenerateStoreField(masm(), object, index, transition, r1, r2, r3, &miss);
2543 __ bind(&miss);
2544 Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
2545 __ Jump(ic, RelocInfo::CODE_TARGET);
2546
2547 // Return the generated code.
2548 return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
2549 }
2550
2551
CompileStoreCallback(Handle<JSObject> object,Handle<AccessorInfo> callback,Handle<String> name)2552 Handle<Code> StoreStubCompiler::CompileStoreCallback(
2553 Handle<JSObject> object,
2554 Handle<AccessorInfo> callback,
2555 Handle<String> name) {
2556 // ----------- S t a t e -------------
2557 // -- r0 : value
2558 // -- r1 : receiver
2559 // -- r2 : name
2560 // -- lr : return address
2561 // -----------------------------------
2562 Label miss;
2563
2564 // Check that the map of the object hasn't changed.
2565 __ CheckMap(r1, r3, Handle<Map>(object->map()), &miss,
2566 DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
2567
2568 // Perform global security token check if needed.
2569 if (object->IsJSGlobalProxy()) {
2570 __ CheckAccessGlobalProxy(r1, r3, &miss);
2571 }
2572
2573 // Stub never generated for non-global objects that require access
2574 // checks.
2575 ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
2576
2577 __ push(r1); // receiver
2578 __ mov(ip, Operand(callback)); // callback info
2579 __ Push(ip, r2, r0);
2580
2581 // Do tail-call to the runtime system.
2582 ExternalReference store_callback_property =
2583 ExternalReference(IC_Utility(IC::kStoreCallbackProperty),
2584 masm()->isolate());
2585 __ TailCallExternalReference(store_callback_property, 4, 1);
2586
2587 // Handle store cache miss.
2588 __ bind(&miss);
2589 Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
2590 __ Jump(ic, RelocInfo::CODE_TARGET);
2591
2592 // Return the generated code.
2593 return GetCode(CALLBACKS, name);
2594 }
2595
2596
CompileStoreInterceptor(Handle<JSObject> receiver,Handle<String> name)2597 Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
2598 Handle<JSObject> receiver,
2599 Handle<String> name) {
2600 // ----------- S t a t e -------------
2601 // -- r0 : value
2602 // -- r1 : receiver
2603 // -- r2 : name
2604 // -- lr : return address
2605 // -----------------------------------
2606 Label miss;
2607
2608 // Check that the map of the object hasn't changed.
2609 __ CheckMap(r1, r3, Handle<Map>(receiver->map()), &miss,
2610 DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
2611
2612 // Perform global security token check if needed.
2613 if (receiver->IsJSGlobalProxy()) {
2614 __ CheckAccessGlobalProxy(r1, r3, &miss);
2615 }
2616
2617 // Stub is never generated for non-global objects that require access
2618 // checks.
2619 ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
2620
2621 __ Push(r1, r2, r0); // Receiver, name, value.
2622
2623 __ mov(r0, Operand(Smi::FromInt(strict_mode_)));
2624 __ push(r0); // strict mode
2625
2626 // Do tail-call to the runtime system.
2627 ExternalReference store_ic_property =
2628 ExternalReference(IC_Utility(IC::kStoreInterceptorProperty),
2629 masm()->isolate());
2630 __ TailCallExternalReference(store_ic_property, 4, 1);
2631
2632 // Handle store cache miss.
2633 __ bind(&miss);
2634 Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
2635 __ Jump(ic, RelocInfo::CODE_TARGET);
2636
2637 // Return the generated code.
2638 return GetCode(INTERCEPTOR, name);
2639 }
2640
2641
CompileStoreGlobal(Handle<GlobalObject> object,Handle<JSGlobalPropertyCell> cell,Handle<String> name)2642 Handle<Code> StoreStubCompiler::CompileStoreGlobal(
2643 Handle<GlobalObject> object,
2644 Handle<JSGlobalPropertyCell> cell,
2645 Handle<String> name) {
2646 // ----------- S t a t e -------------
2647 // -- r0 : value
2648 // -- r1 : receiver
2649 // -- r2 : name
2650 // -- lr : return address
2651 // -----------------------------------
2652 Label miss;
2653
2654 // Check that the map of the global has not changed.
2655 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
2656 __ cmp(r3, Operand(Handle<Map>(object->map())));
2657 __ b(ne, &miss);
2658
2659 // Check that the value in the cell is not the hole. If it is, this
2660 // cell could have been deleted and reintroducing the global needs
2661 // to update the property details in the property dictionary of the
2662 // global object. We bail out to the runtime system to do that.
2663 __ mov(r4, Operand(cell));
2664 __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
2665 __ ldr(r6, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
2666 __ cmp(r5, r6);
2667 __ b(eq, &miss);
2668
2669 // Store the value in the cell.
2670 __ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
2671 // Cells are always rescanned, so no write barrier here.
2672
2673 Counters* counters = masm()->isolate()->counters();
2674 __ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3);
2675 __ Ret();
2676
2677 // Handle store cache miss.
2678 __ bind(&miss);
2679 __ IncrementCounter(counters->named_store_global_inline_miss(), 1, r4, r3);
2680 Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
2681 __ Jump(ic, RelocInfo::CODE_TARGET);
2682
2683 // Return the generated code.
2684 return GetCode(NORMAL, name);
2685 }
2686
2687
CompileLoadNonexistent(Handle<String> name,Handle<JSObject> object,Handle<JSObject> last)2688 Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
2689 Handle<JSObject> object,
2690 Handle<JSObject> last) {
2691 // ----------- S t a t e -------------
2692 // -- r0 : receiver
2693 // -- lr : return address
2694 // -----------------------------------
2695 Label miss;
2696
2697 // Check that receiver is not a smi.
2698 __ JumpIfSmi(r0, &miss);
2699
2700 // Check the maps of the full prototype chain.
2701 CheckPrototypes(object, r0, last, r3, r1, r4, name, &miss);
2702
2703 // If the last object in the prototype chain is a global object,
2704 // check that the global property cell is empty.
2705 if (last->IsGlobalObject()) {
2706 GenerateCheckPropertyCell(
2707 masm(), Handle<GlobalObject>::cast(last), name, r1, &miss);
2708 }
2709
2710 // Return undefined if maps of the full prototype chain are still the
2711 // same and no global property with this name contains a value.
2712 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2713 __ Ret();
2714
2715 __ bind(&miss);
2716 GenerateLoadMiss(masm(), Code::LOAD_IC);
2717
2718 // Return the generated code.
2719 return GetCode(NONEXISTENT, factory()->empty_string());
2720 }
2721
2722
CompileLoadField(Handle<JSObject> object,Handle<JSObject> holder,int index,Handle<String> name)2723 Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
2724 Handle<JSObject> holder,
2725 int index,
2726 Handle<String> name) {
2727 // ----------- S t a t e -------------
2728 // -- r0 : receiver
2729 // -- r2 : name
2730 // -- lr : return address
2731 // -----------------------------------
2732 Label miss;
2733
2734 GenerateLoadField(object, holder, r0, r3, r1, r4, index, name, &miss);
2735 __ bind(&miss);
2736 GenerateLoadMiss(masm(), Code::LOAD_IC);
2737
2738 // Return the generated code.
2739 return GetCode(FIELD, name);
2740 }
2741
2742
CompileLoadCallback(Handle<String> name,Handle<JSObject> object,Handle<JSObject> holder,Handle<AccessorInfo> callback)2743 Handle<Code> LoadStubCompiler::CompileLoadCallback(
2744 Handle<String> name,
2745 Handle<JSObject> object,
2746 Handle<JSObject> holder,
2747 Handle<AccessorInfo> callback) {
2748 // ----------- S t a t e -------------
2749 // -- r0 : receiver
2750 // -- r2 : name
2751 // -- lr : return address
2752 // -----------------------------------
2753 Label miss;
2754 GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, callback, name,
2755 &miss);
2756 __ bind(&miss);
2757 GenerateLoadMiss(masm(), Code::LOAD_IC);
2758
2759 // Return the generated code.
2760 return GetCode(CALLBACKS, name);
2761 }
2762
2763
CompileLoadConstant(Handle<JSObject> object,Handle<JSObject> holder,Handle<JSFunction> value,Handle<String> name)2764 Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
2765 Handle<JSObject> holder,
2766 Handle<JSFunction> value,
2767 Handle<String> name) {
2768 // ----------- S t a t e -------------
2769 // -- r0 : receiver
2770 // -- r2 : name
2771 // -- lr : return address
2772 // -----------------------------------
2773 Label miss;
2774
2775 GenerateLoadConstant(object, holder, r0, r3, r1, r4, value, name, &miss);
2776 __ bind(&miss);
2777 GenerateLoadMiss(masm(), Code::LOAD_IC);
2778
2779 // Return the generated code.
2780 return GetCode(CONSTANT_FUNCTION, name);
2781 }
2782
2783
CompileLoadInterceptor(Handle<JSObject> object,Handle<JSObject> holder,Handle<String> name)2784 Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
2785 Handle<JSObject> holder,
2786 Handle<String> name) {
2787 // ----------- S t a t e -------------
2788 // -- r0 : receiver
2789 // -- r2 : name
2790 // -- lr : return address
2791 // -----------------------------------
2792 Label miss;
2793
2794 LookupResult lookup(isolate());
2795 LookupPostInterceptor(holder, name, &lookup);
2796 GenerateLoadInterceptor(object, holder, &lookup, r0, r2, r3, r1, r4, name,
2797 &miss);
2798 __ bind(&miss);
2799 GenerateLoadMiss(masm(), Code::LOAD_IC);
2800
2801 // Return the generated code.
2802 return GetCode(INTERCEPTOR, name);
2803 }
2804
2805
CompileLoadGlobal(Handle<JSObject> object,Handle<GlobalObject> holder,Handle<JSGlobalPropertyCell> cell,Handle<String> name,bool is_dont_delete)2806 Handle<Code> LoadStubCompiler::CompileLoadGlobal(
2807 Handle<JSObject> object,
2808 Handle<GlobalObject> holder,
2809 Handle<JSGlobalPropertyCell> cell,
2810 Handle<String> name,
2811 bool is_dont_delete) {
2812 // ----------- S t a t e -------------
2813 // -- r0 : receiver
2814 // -- r2 : name
2815 // -- lr : return address
2816 // -----------------------------------
2817 Label miss;
2818
2819 // Check that the map of the global has not changed.
2820 __ JumpIfSmi(r0, &miss);
2821 CheckPrototypes(object, r0, holder, r3, r4, r1, name, &miss);
2822
2823 // Get the value from the cell.
2824 __ mov(r3, Operand(cell));
2825 __ ldr(r4, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
2826
2827 // Check for deleted property if property can actually be deleted.
2828 if (!is_dont_delete) {
2829 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2830 __ cmp(r4, ip);
2831 __ b(eq, &miss);
2832 }
2833
2834 __ mov(r0, r4);
2835 Counters* counters = masm()->isolate()->counters();
2836 __ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3);
2837 __ Ret();
2838
2839 __ bind(&miss);
2840 __ IncrementCounter(counters->named_load_global_stub_miss(), 1, r1, r3);
2841 GenerateLoadMiss(masm(), Code::LOAD_IC);
2842
2843 // Return the generated code.
2844 return GetCode(NORMAL, name);
2845 }
2846
2847
CompileLoadField(Handle<String> name,Handle<JSObject> receiver,Handle<JSObject> holder,int index)2848 Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
2849 Handle<JSObject> receiver,
2850 Handle<JSObject> holder,
2851 int index) {
2852 // ----------- S t a t e -------------
2853 // -- lr : return address
2854 // -- r0 : key
2855 // -- r1 : receiver
2856 // -----------------------------------
2857 Label miss;
2858
2859 // Check the key is the cached one.
2860 __ cmp(r0, Operand(name));
2861 __ b(ne, &miss);
2862
2863 GenerateLoadField(receiver, holder, r1, r2, r3, r4, index, name, &miss);
2864 __ bind(&miss);
2865 GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
2866
2867 return GetCode(FIELD, name);
2868 }
2869
2870
CompileLoadCallback(Handle<String> name,Handle<JSObject> receiver,Handle<JSObject> holder,Handle<AccessorInfo> callback)2871 Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
2872 Handle<String> name,
2873 Handle<JSObject> receiver,
2874 Handle<JSObject> holder,
2875 Handle<AccessorInfo> callback) {
2876 // ----------- S t a t e -------------
2877 // -- lr : return address
2878 // -- r0 : key
2879 // -- r1 : receiver
2880 // -----------------------------------
2881 Label miss;
2882
2883 // Check the key is the cached one.
2884 __ cmp(r0, Operand(name));
2885 __ b(ne, &miss);
2886
2887 GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, callback, name,
2888 &miss);
2889 __ bind(&miss);
2890 GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
2891
2892 return GetCode(CALLBACKS, name);
2893 }
2894
2895
CompileLoadConstant(Handle<String> name,Handle<JSObject> receiver,Handle<JSObject> holder,Handle<JSFunction> value)2896 Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
2897 Handle<String> name,
2898 Handle<JSObject> receiver,
2899 Handle<JSObject> holder,
2900 Handle<JSFunction> value) {
2901 // ----------- S t a t e -------------
2902 // -- lr : return address
2903 // -- r0 : key
2904 // -- r1 : receiver
2905 // -----------------------------------
2906 Label miss;
2907
2908 // Check the key is the cached one.
2909 __ cmp(r0, Operand(name));
2910 __ b(ne, &miss);
2911
2912 GenerateLoadConstant(receiver, holder, r1, r2, r3, r4, value, name, &miss);
2913 __ bind(&miss);
2914 GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
2915
2916 // Return the generated code.
2917 return GetCode(CONSTANT_FUNCTION, name);
2918 }
2919
2920
CompileLoadInterceptor(Handle<JSObject> receiver,Handle<JSObject> holder,Handle<String> name)2921 Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
2922 Handle<JSObject> receiver,
2923 Handle<JSObject> holder,
2924 Handle<String> name) {
2925 // ----------- S t a t e -------------
2926 // -- lr : return address
2927 // -- r0 : key
2928 // -- r1 : receiver
2929 // -----------------------------------
2930 Label miss;
2931
2932 // Check the key is the cached one.
2933 __ cmp(r0, Operand(name));
2934 __ b(ne, &miss);
2935
2936 LookupResult lookup(isolate());
2937 LookupPostInterceptor(holder, name, &lookup);
2938 GenerateLoadInterceptor(receiver, holder, &lookup, r1, r0, r2, r3, r4, name,
2939 &miss);
2940 __ bind(&miss);
2941 GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
2942
2943 return GetCode(INTERCEPTOR, name);
2944 }
2945
2946
CompileLoadArrayLength(Handle<String> name)2947 Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
2948 Handle<String> name) {
2949 // ----------- S t a t e -------------
2950 // -- lr : return address
2951 // -- r0 : key
2952 // -- r1 : receiver
2953 // -----------------------------------
2954 Label miss;
2955
2956 // Check the key is the cached one.
2957 __ cmp(r0, Operand(name));
2958 __ b(ne, &miss);
2959
2960 GenerateLoadArrayLength(masm(), r1, r2, &miss);
2961 __ bind(&miss);
2962 GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
2963
2964 return GetCode(CALLBACKS, name);
2965 }
2966
2967
CompileLoadStringLength(Handle<String> name)2968 Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
2969 Handle<String> name) {
2970 // ----------- S t a t e -------------
2971 // -- lr : return address
2972 // -- r0 : key
2973 // -- r1 : receiver
2974 // -----------------------------------
2975 Label miss;
2976
2977 Counters* counters = masm()->isolate()->counters();
2978 __ IncrementCounter(counters->keyed_load_string_length(), 1, r2, r3);
2979
2980 // Check the key is the cached one.
2981 __ cmp(r0, Operand(name));
2982 __ b(ne, &miss);
2983
2984 GenerateLoadStringLength(masm(), r1, r2, r3, &miss, true);
2985 __ bind(&miss);
2986 __ DecrementCounter(counters->keyed_load_string_length(), 1, r2, r3);
2987
2988 GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
2989
2990 return GetCode(CALLBACKS, name);
2991 }
2992
2993
CompileLoadFunctionPrototype(Handle<String> name)2994 Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
2995 Handle<String> name) {
2996 // ----------- S t a t e -------------
2997 // -- lr : return address
2998 // -- r0 : key
2999 // -- r1 : receiver
3000 // -----------------------------------
3001 Label miss;
3002
3003 Counters* counters = masm()->isolate()->counters();
3004 __ IncrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3);
3005
3006 // Check the name hasn't changed.
3007 __ cmp(r0, Operand(name));
3008 __ b(ne, &miss);
3009
3010 GenerateLoadFunctionPrototype(masm(), r1, r2, r3, &miss);
3011 __ bind(&miss);
3012 __ DecrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3);
3013 GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
3014
3015 return GetCode(CALLBACKS, name);
3016 }
3017
3018
CompileLoadElement(Handle<Map> receiver_map)3019 Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
3020 Handle<Map> receiver_map) {
3021 // ----------- S t a t e -------------
3022 // -- lr : return address
3023 // -- r0 : key
3024 // -- r1 : receiver
3025 // -----------------------------------
3026 ElementsKind elements_kind = receiver_map->elements_kind();
3027 Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
3028
3029 __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
3030
3031 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
3032 __ Jump(ic, RelocInfo::CODE_TARGET);
3033
3034 // Return the generated code.
3035 return GetCode(NORMAL, factory()->empty_string());
3036 }
3037
3038
CompileLoadPolymorphic(MapHandleList * receiver_maps,CodeHandleList * handler_ics)3039 Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
3040 MapHandleList* receiver_maps,
3041 CodeHandleList* handler_ics) {
3042 // ----------- S t a t e -------------
3043 // -- lr : return address
3044 // -- r0 : key
3045 // -- r1 : receiver
3046 // -----------------------------------
3047 Label miss;
3048 __ JumpIfSmi(r1, &miss);
3049
3050 int receiver_count = receiver_maps->length();
3051 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
3052 for (int current = 0; current < receiver_count; ++current) {
3053 __ mov(ip, Operand(receiver_maps->at(current)));
3054 __ cmp(r2, ip);
3055 __ Jump(handler_ics->at(current), RelocInfo::CODE_TARGET, eq);
3056 }
3057
3058 __ bind(&miss);
3059 Handle<Code> miss_ic = isolate()->builtins()->KeyedLoadIC_Miss();
3060 __ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
3061
3062 // Return the generated code.
3063 return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
3064 }
3065
3066
CompileStoreField(Handle<JSObject> object,int index,Handle<Map> transition,Handle<String> name)3067 Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
3068 int index,
3069 Handle<Map> transition,
3070 Handle<String> name) {
3071 // ----------- S t a t e -------------
3072 // -- r0 : value
3073 // -- r1 : name
3074 // -- r2 : receiver
3075 // -- lr : return address
3076 // -----------------------------------
3077 Label miss;
3078
3079 Counters* counters = masm()->isolate()->counters();
3080 __ IncrementCounter(counters->keyed_store_field(), 1, r3, r4);
3081
3082 // Check that the name has not changed.
3083 __ cmp(r1, Operand(name));
3084 __ b(ne, &miss);
3085
3086 // r3 is used as scratch register. r1 and r2 keep their values if a jump to
3087 // the miss label is generated.
3088 GenerateStoreField(masm(), object, index, transition, r2, r1, r3, &miss);
3089 __ bind(&miss);
3090
3091 __ DecrementCounter(counters->keyed_store_field(), 1, r3, r4);
3092 Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss();
3093 __ Jump(ic, RelocInfo::CODE_TARGET);
3094
3095 // Return the generated code.
3096 return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
3097 }
3098
3099
CompileStoreElement(Handle<Map> receiver_map)3100 Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
3101 Handle<Map> receiver_map) {
3102 // ----------- S t a t e -------------
3103 // -- r0 : value
3104 // -- r1 : key
3105 // -- r2 : receiver
3106 // -- lr : return address
3107 // -- r3 : scratch
3108 // -----------------------------------
3109 ElementsKind elements_kind = receiver_map->elements_kind();
3110 bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
3111 Handle<Code> stub =
3112 KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
3113
3114 __ DispatchMap(r2, r3, receiver_map, stub, DO_SMI_CHECK);
3115
3116 Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
3117 __ Jump(ic, RelocInfo::CODE_TARGET);
3118
3119 // Return the generated code.
3120 return GetCode(NORMAL, factory()->empty_string());
3121 }
3122
3123
CompileStorePolymorphic(MapHandleList * receiver_maps,CodeHandleList * handler_stubs,MapHandleList * transitioned_maps)3124 Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
3125 MapHandleList* receiver_maps,
3126 CodeHandleList* handler_stubs,
3127 MapHandleList* transitioned_maps) {
3128 // ----------- S t a t e -------------
3129 // -- r0 : value
3130 // -- r1 : key
3131 // -- r2 : receiver
3132 // -- lr : return address
3133 // -- r3 : scratch
3134 // -----------------------------------
3135 Label miss;
3136 __ JumpIfSmi(r2, &miss);
3137
3138 int receiver_count = receiver_maps->length();
3139 __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
3140 for (int i = 0; i < receiver_count; ++i) {
3141 __ mov(ip, Operand(receiver_maps->at(i)));
3142 __ cmp(r3, ip);
3143 if (transitioned_maps->at(i).is_null()) {
3144 __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
3145 } else {
3146 Label next_map;
3147 __ b(ne, &next_map);
3148 __ mov(r3, Operand(transitioned_maps->at(i)));
3149 __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
3150 __ bind(&next_map);
3151 }
3152 }
3153
3154 __ bind(&miss);
3155 Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
3156 __ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
3157
3158 // Return the generated code.
3159 return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
3160 }
3161
3162
CompileConstructStub(Handle<JSFunction> function)3163 Handle<Code> ConstructStubCompiler::CompileConstructStub(
3164 Handle<JSFunction> function) {
3165 // ----------- S t a t e -------------
3166 // -- r0 : argc
3167 // -- r1 : constructor
3168 // -- lr : return address
3169 // -- [sp] : last argument
3170 // -----------------------------------
3171 Label generic_stub_call;
3172
3173 // Use r7 for holding undefined which is used in several places below.
3174 __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
3175
3176 #ifdef ENABLE_DEBUGGER_SUPPORT
3177 // Check to see whether there are any break points in the function code. If
3178 // there are jump to the generic constructor stub which calls the actual
3179 // code for the function thereby hitting the break points.
3180 __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
3181 __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset));
3182 __ cmp(r2, r7);
3183 __ b(ne, &generic_stub_call);
3184 #endif
3185
3186 // Load the initial map and verify that it is in fact a map.
3187 // r1: constructor function
3188 // r7: undefined
3189 __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
3190 __ JumpIfSmi(r2, &generic_stub_call);
3191 __ CompareObjectType(r2, r3, r4, MAP_TYPE);
3192 __ b(ne, &generic_stub_call);
3193
3194 #ifdef DEBUG
3195 // Cannot construct functions this way.
3196 // r0: argc
3197 // r1: constructor function
3198 // r2: initial map
3199 // r7: undefined
3200 __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
3201 __ Check(ne, "Function constructed by construct stub.");
3202 #endif
3203
3204 // Now allocate the JSObject in new space.
3205 // r0: argc
3206 // r1: constructor function
3207 // r2: initial map
3208 // r7: undefined
3209 __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
3210 __ AllocateInNewSpace(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS);
3211
3212 // Allocated the JSObject, now initialize the fields. Map is set to initial
3213 // map and properties and elements are set to empty fixed array.
3214 // r0: argc
3215 // r1: constructor function
3216 // r2: initial map
3217 // r3: object size (in words)
3218 // r4: JSObject (not tagged)
3219 // r7: undefined
3220 __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
3221 __ mov(r5, r4);
3222 ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
3223 __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
3224 ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
3225 __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
3226 ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
3227 __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
3228
3229 // Calculate the location of the first argument. The stack contains only the
3230 // argc arguments.
3231 __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
3232
3233 // Fill all the in-object properties with undefined.
3234 // r0: argc
3235 // r1: first argument
3236 // r3: object size (in words)
3237 // r4: JSObject (not tagged)
3238 // r5: First in-object property of JSObject (not tagged)
3239 // r7: undefined
3240 // Fill the initialized properties with a constant value or a passed argument
3241 // depending on the this.x = ...; assignment in the function.
3242 Handle<SharedFunctionInfo> shared(function->shared());
3243 for (int i = 0; i < shared->this_property_assignments_count(); i++) {
3244 if (shared->IsThisPropertyAssignmentArgument(i)) {
3245 Label not_passed, next;
3246 // Check if the argument assigned to the property is actually passed.
3247 int arg_number = shared->GetThisPropertyAssignmentArgument(i);
3248 __ cmp(r0, Operand(arg_number));
3249 __ b(le, ¬_passed);
3250 // Argument passed - find it on the stack.
3251 __ ldr(r2, MemOperand(r1, (arg_number + 1) * -kPointerSize));
3252 __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
3253 __ b(&next);
3254 __ bind(¬_passed);
3255 // Set the property to undefined.
3256 __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
3257 __ bind(&next);
3258 } else {
3259 // Set the property to the constant value.
3260 Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
3261 __ mov(r2, Operand(constant));
3262 __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
3263 }
3264 }
3265
3266 // Fill the unused in-object property fields with undefined.
3267 ASSERT(function->has_initial_map());
3268 for (int i = shared->this_property_assignments_count();
3269 i < function->initial_map()->inobject_properties();
3270 i++) {
3271 __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
3272 }
3273
3274 // r0: argc
3275 // r4: JSObject (not tagged)
3276 // Move argc to r1 and the JSObject to return to r0 and tag it.
3277 __ mov(r1, r0);
3278 __ mov(r0, r4);
3279 __ orr(r0, r0, Operand(kHeapObjectTag));
3280
3281 // r0: JSObject
3282 // r1: argc
3283 // Remove caller arguments and receiver from the stack and return.
3284 __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2));
3285 __ add(sp, sp, Operand(kPointerSize));
3286 Counters* counters = masm()->isolate()->counters();
3287 __ IncrementCounter(counters->constructed_objects(), 1, r1, r2);
3288 __ IncrementCounter(counters->constructed_objects_stub(), 1, r1, r2);
3289 __ Jump(lr);
3290
3291 // Jump to the generic stub in case the specialized code cannot handle the
3292 // construction.
3293 __ bind(&generic_stub_call);
3294 Handle<Code> code = masm()->isolate()->builtins()->JSConstructStubGeneric();
3295 __ Jump(code, RelocInfo::CODE_TARGET);
3296
3297 // Return the generated code.
3298 return GetCode();
3299 }
3300
3301
3302 #undef __
3303 #define __ ACCESS_MASM(masm)
3304
3305
GenerateLoadDictionaryElement(MacroAssembler * masm)3306 void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
3307 MacroAssembler* masm) {
3308 // ---------- S t a t e --------------
3309 // -- lr : return address
3310 // -- r0 : key
3311 // -- r1 : receiver
3312 // -----------------------------------
3313 Label slow, miss_force_generic;
3314
3315 Register key = r0;
3316 Register receiver = r1;
3317
3318 __ JumpIfNotSmi(key, &miss_force_generic);
3319 __ mov(r2, Operand(key, ASR, kSmiTagSize));
3320 __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
3321 __ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5);
3322 __ Ret();
3323
3324 __ bind(&slow);
3325 __ IncrementCounter(
3326 masm->isolate()->counters()->keyed_load_external_array_slow(),
3327 1, r2, r3);
3328
3329 // ---------- S t a t e --------------
3330 // -- lr : return address
3331 // -- r0 : key
3332 // -- r1 : receiver
3333 // -----------------------------------
3334 Handle<Code> slow_ic =
3335 masm->isolate()->builtins()->KeyedLoadIC_Slow();
3336 __ Jump(slow_ic, RelocInfo::CODE_TARGET);
3337
3338 // Miss case, call the runtime.
3339 __ bind(&miss_force_generic);
3340
3341 // ---------- S t a t e --------------
3342 // -- lr : return address
3343 // -- r0 : key
3344 // -- r1 : receiver
3345 // -----------------------------------
3346
3347 Handle<Code> miss_ic =
3348 masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
3349 __ Jump(miss_ic, RelocInfo::CODE_TARGET);
3350 }
3351
3352
IsElementTypeSigned(ElementsKind elements_kind)3353 static bool IsElementTypeSigned(ElementsKind elements_kind) {
3354 switch (elements_kind) {
3355 case EXTERNAL_BYTE_ELEMENTS:
3356 case EXTERNAL_SHORT_ELEMENTS:
3357 case EXTERNAL_INT_ELEMENTS:
3358 return true;
3359
3360 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3361 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3362 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3363 case EXTERNAL_PIXEL_ELEMENTS:
3364 return false;
3365
3366 case EXTERNAL_FLOAT_ELEMENTS:
3367 case EXTERNAL_DOUBLE_ELEMENTS:
3368 case FAST_ELEMENTS:
3369 case FAST_SMI_ONLY_ELEMENTS:
3370 case FAST_DOUBLE_ELEMENTS:
3371 case DICTIONARY_ELEMENTS:
3372 case NON_STRICT_ARGUMENTS_ELEMENTS:
3373 UNREACHABLE();
3374 return false;
3375 }
3376 return false;
3377 }
3378
3379
GenerateLoadExternalArray(MacroAssembler * masm,ElementsKind elements_kind)3380 void KeyedLoadStubCompiler::GenerateLoadExternalArray(
3381 MacroAssembler* masm,
3382 ElementsKind elements_kind) {
3383 // ---------- S t a t e --------------
3384 // -- lr : return address
3385 // -- r0 : key
3386 // -- r1 : receiver
3387 // -----------------------------------
3388 Label miss_force_generic, slow, failed_allocation;
3389
3390 Register key = r0;
3391 Register receiver = r1;
3392
3393 // This stub is meant to be tail-jumped to, the receiver must already
3394 // have been verified by the caller to not be a smi.
3395
3396 // Check that the key is a smi.
3397 __ JumpIfNotSmi(key, &miss_force_generic);
3398
3399 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
3400 // r3: elements array
3401
3402 // Check that the index is in range.
3403 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
3404 __ cmp(key, ip);
3405 // Unsigned comparison catches both negative and too-large values.
3406 __ b(hs, &miss_force_generic);
3407
3408 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
3409 // r3: base pointer of external storage
3410
3411 // We are not untagging smi key and instead work with it
3412 // as if it was premultiplied by 2.
3413 STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
3414
3415 Register value = r2;
3416 switch (elements_kind) {
3417 case EXTERNAL_BYTE_ELEMENTS:
3418 __ ldrsb(value, MemOperand(r3, key, LSR, 1));
3419 break;
3420 case EXTERNAL_PIXEL_ELEMENTS:
3421 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3422 __ ldrb(value, MemOperand(r3, key, LSR, 1));
3423 break;
3424 case EXTERNAL_SHORT_ELEMENTS:
3425 __ ldrsh(value, MemOperand(r3, key, LSL, 0));
3426 break;
3427 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3428 __ ldrh(value, MemOperand(r3, key, LSL, 0));
3429 break;
3430 case EXTERNAL_INT_ELEMENTS:
3431 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3432 __ ldr(value, MemOperand(r3, key, LSL, 1));
3433 break;
3434 case EXTERNAL_FLOAT_ELEMENTS:
3435 if (CpuFeatures::IsSupported(VFP3)) {
3436 CpuFeatures::Scope scope(VFP3);
3437 __ add(r2, r3, Operand(key, LSL, 1));
3438 __ vldr(s0, r2, 0);
3439 } else {
3440 __ ldr(value, MemOperand(r3, key, LSL, 1));
3441 }
3442 break;
3443 case EXTERNAL_DOUBLE_ELEMENTS:
3444 if (CpuFeatures::IsSupported(VFP3)) {
3445 CpuFeatures::Scope scope(VFP3);
3446 __ add(r2, r3, Operand(key, LSL, 2));
3447 __ vldr(d0, r2, 0);
3448 } else {
3449 __ add(r4, r3, Operand(key, LSL, 2));
3450 // r4: pointer to the beginning of the double we want to load.
3451 __ ldr(r2, MemOperand(r4, 0));
3452 __ ldr(r3, MemOperand(r4, Register::kSizeInBytes));
3453 }
3454 break;
3455 case FAST_ELEMENTS:
3456 case FAST_SMI_ONLY_ELEMENTS:
3457 case FAST_DOUBLE_ELEMENTS:
3458 case DICTIONARY_ELEMENTS:
3459 case NON_STRICT_ARGUMENTS_ELEMENTS:
3460 UNREACHABLE();
3461 break;
3462 }
3463
3464 // For integer array types:
3465 // r2: value
3466 // For float array type:
3467 // s0: value (if VFP3 is supported)
3468 // r2: value (if VFP3 is not supported)
3469 // For double array type:
3470 // d0: value (if VFP3 is supported)
3471 // r2/r3: value (if VFP3 is not supported)
3472
3473 if (elements_kind == EXTERNAL_INT_ELEMENTS) {
3474 // For the Int and UnsignedInt array types, we need to see whether
3475 // the value can be represented in a Smi. If not, we need to convert
3476 // it to a HeapNumber.
3477 Label box_int;
3478 __ cmp(value, Operand(0xC0000000));
3479 __ b(mi, &box_int);
3480 // Tag integer as smi and return it.
3481 __ mov(r0, Operand(value, LSL, kSmiTagSize));
3482 __ Ret();
3483
3484 __ bind(&box_int);
3485 // Allocate a HeapNumber for the result and perform int-to-double
3486 // conversion. Don't touch r0 or r1 as they are needed if allocation
3487 // fails.
3488 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3489 __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
3490 // Now we can use r0 for the result as key is not needed any more.
3491 __ mov(r0, r5);
3492
3493 if (CpuFeatures::IsSupported(VFP3)) {
3494 CpuFeatures::Scope scope(VFP3);
3495 __ vmov(s0, value);
3496 __ vcvt_f64_s32(d0, s0);
3497 __ sub(r3, r0, Operand(kHeapObjectTag));
3498 __ vstr(d0, r3, HeapNumber::kValueOffset);
3499 __ Ret();
3500 } else {
3501 Register dst1 = r1;
3502 Register dst2 = r3;
3503 FloatingPointHelper::Destination dest =
3504 FloatingPointHelper::kCoreRegisters;
3505 FloatingPointHelper::ConvertIntToDouble(masm,
3506 value,
3507 dest,
3508 d0,
3509 dst1,
3510 dst2,
3511 r9,
3512 s0);
3513 __ str(dst1, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
3514 __ str(dst2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
3515 __ Ret();
3516 }
3517 } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
3518 // The test is different for unsigned int values. Since we need
3519 // the value to be in the range of a positive smi, we can't
3520 // handle either of the top two bits being set in the value.
3521 if (CpuFeatures::IsSupported(VFP3)) {
3522 CpuFeatures::Scope scope(VFP3);
3523 Label box_int, done;
3524 __ tst(value, Operand(0xC0000000));
3525 __ b(ne, &box_int);
3526 // Tag integer as smi and return it.
3527 __ mov(r0, Operand(value, LSL, kSmiTagSize));
3528 __ Ret();
3529
3530 __ bind(&box_int);
3531 __ vmov(s0, value);
3532 // Allocate a HeapNumber for the result and perform int-to-double
3533 // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
3534 // registers - also when jumping due to exhausted young space.
3535 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3536 __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
3537
3538 __ vcvt_f64_u32(d0, s0);
3539 __ sub(r1, r2, Operand(kHeapObjectTag));
3540 __ vstr(d0, r1, HeapNumber::kValueOffset);
3541
3542 __ mov(r0, r2);
3543 __ Ret();
3544 } else {
3545 // Check whether unsigned integer fits into smi.
3546 Label box_int_0, box_int_1, done;
3547 __ tst(value, Operand(0x80000000));
3548 __ b(ne, &box_int_0);
3549 __ tst(value, Operand(0x40000000));
3550 __ b(ne, &box_int_1);
3551 // Tag integer as smi and return it.
3552 __ mov(r0, Operand(value, LSL, kSmiTagSize));
3553 __ Ret();
3554
3555 Register hiword = value; // r2.
3556 Register loword = r3;
3557
3558 __ bind(&box_int_0);
3559 // Integer does not have leading zeros.
3560 GenerateUInt2Double(masm, hiword, loword, r4, 0);
3561 __ b(&done);
3562
3563 __ bind(&box_int_1);
3564 // Integer has one leading zero.
3565 GenerateUInt2Double(masm, hiword, loword, r4, 1);
3566
3567
3568 __ bind(&done);
3569 // Integer was converted to double in registers hiword:loword.
3570 // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
3571 // clobbers all registers - also when jumping due to exhausted young
3572 // space.
3573 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3574 __ AllocateHeapNumber(r4, r5, r7, r6, &slow);
3575
3576 __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
3577 __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
3578
3579 __ mov(r0, r4);
3580 __ Ret();
3581 }
3582 } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3583 // For the floating-point array type, we need to always allocate a
3584 // HeapNumber.
3585 if (CpuFeatures::IsSupported(VFP3)) {
3586 CpuFeatures::Scope scope(VFP3);
3587 // Allocate a HeapNumber for the result. Don't use r0 and r1 as
3588 // AllocateHeapNumber clobbers all registers - also when jumping due to
3589 // exhausted young space.
3590 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3591 __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
3592 __ vcvt_f64_f32(d0, s0);
3593 __ sub(r1, r2, Operand(kHeapObjectTag));
3594 __ vstr(d0, r1, HeapNumber::kValueOffset);
3595
3596 __ mov(r0, r2);
3597 __ Ret();
3598 } else {
3599 // Allocate a HeapNumber for the result. Don't use r0 and r1 as
3600 // AllocateHeapNumber clobbers all registers - also when jumping due to
3601 // exhausted young space.
3602 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3603 __ AllocateHeapNumber(r3, r4, r5, r6, &slow);
3604 // VFP is not available, do manual single to double conversion.
3605
3606 // r2: floating point value (binary32)
3607 // r3: heap number for result
3608
3609 // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
3610 // the slow case from here.
3611 __ and_(r0, value, Operand(kBinary32MantissaMask));
3612
3613 // Extract exponent to r1. OK to clobber r1 now as there are no jumps to
3614 // the slow case from here.
3615 __ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
3616 __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
3617
3618 Label exponent_rebiased;
3619 __ teq(r1, Operand(0x00));
3620 __ b(eq, &exponent_rebiased);
3621
3622 __ teq(r1, Operand(0xff));
3623 __ mov(r1, Operand(0x7ff), LeaveCC, eq);
3624 __ b(eq, &exponent_rebiased);
3625
3626 // Rebias exponent.
3627 __ add(r1,
3628 r1,
3629 Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
3630
3631 __ bind(&exponent_rebiased);
3632 __ and_(r2, value, Operand(kBinary32SignMask));
3633 value = no_reg;
3634 __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
3635
3636 // Shift mantissa.
3637 static const int kMantissaShiftForHiWord =
3638 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
3639
3640 static const int kMantissaShiftForLoWord =
3641 kBitsPerInt - kMantissaShiftForHiWord;
3642
3643 __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
3644 __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
3645
3646 __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
3647 __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
3648
3649 __ mov(r0, r3);
3650 __ Ret();
3651 }
3652 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3653 if (CpuFeatures::IsSupported(VFP3)) {
3654 CpuFeatures::Scope scope(VFP3);
3655 // Allocate a HeapNumber for the result. Don't use r0 and r1 as
3656 // AllocateHeapNumber clobbers all registers - also when jumping due to
3657 // exhausted young space.
3658 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3659 __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
3660 __ sub(r1, r2, Operand(kHeapObjectTag));
3661 __ vstr(d0, r1, HeapNumber::kValueOffset);
3662
3663 __ mov(r0, r2);
3664 __ Ret();
3665 } else {
3666 // Allocate a HeapNumber for the result. Don't use r0 and r1 as
3667 // AllocateHeapNumber clobbers all registers - also when jumping due to
3668 // exhausted young space.
3669 __ LoadRoot(r7, Heap::kHeapNumberMapRootIndex);
3670 __ AllocateHeapNumber(r4, r5, r6, r7, &slow);
3671
3672 __ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
3673 __ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset));
3674 __ mov(r0, r4);
3675 __ Ret();
3676 }
3677
3678 } else {
3679 // Tag integer as smi and return it.
3680 __ mov(r0, Operand(value, LSL, kSmiTagSize));
3681 __ Ret();
3682 }
3683
3684 // Slow case, key and receiver still in r0 and r1.
3685 __ bind(&slow);
3686 __ IncrementCounter(
3687 masm->isolate()->counters()->keyed_load_external_array_slow(),
3688 1, r2, r3);
3689
3690 // ---------- S t a t e --------------
3691 // -- lr : return address
3692 // -- r0 : key
3693 // -- r1 : receiver
3694 // -----------------------------------
3695
3696 __ Push(r1, r0);
3697
3698 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
3699
3700 __ bind(&miss_force_generic);
3701 Handle<Code> stub =
3702 masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
3703 __ Jump(stub, RelocInfo::CODE_TARGET);
3704 }
3705
3706
GenerateStoreExternalArray(MacroAssembler * masm,ElementsKind elements_kind)3707 void KeyedStoreStubCompiler::GenerateStoreExternalArray(
3708 MacroAssembler* masm,
3709 ElementsKind elements_kind) {
3710 // ---------- S t a t e --------------
3711 // -- r0 : value
3712 // -- r1 : key
3713 // -- r2 : receiver
3714 // -- lr : return address
3715 // -----------------------------------
3716 Label slow, check_heap_number, miss_force_generic;
3717
3718 // Register usage.
3719 Register value = r0;
3720 Register key = r1;
3721 Register receiver = r2;
3722 // r3 mostly holds the elements array or the destination external array.
3723
3724 // This stub is meant to be tail-jumped to, the receiver must already
3725 // have been verified by the caller to not be a smi.
3726
3727 // Check that the key is a smi.
3728 __ JumpIfNotSmi(key, &miss_force_generic);
3729
3730 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
3731
3732 // Check that the index is in range
3733 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
3734 __ cmp(key, ip);
3735 // Unsigned comparison catches both negative and too-large values.
3736 __ b(hs, &miss_force_generic);
3737
3738 // Handle both smis and HeapNumbers in the fast path. Go to the
3739 // runtime for all other kinds of values.
3740 // r3: external array.
3741 if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
3742 // Double to pixel conversion is only implemented in the runtime for now.
3743 __ JumpIfNotSmi(value, &slow);
3744 } else {
3745 __ JumpIfNotSmi(value, &check_heap_number);
3746 }
3747 __ SmiUntag(r5, value);
3748 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
3749
3750 // r3: base pointer of external storage.
3751 // r5: value (integer).
3752 switch (elements_kind) {
3753 case EXTERNAL_PIXEL_ELEMENTS:
3754 // Clamp the value to [0..255].
3755 __ Usat(r5, 8, Operand(r5));
3756 __ strb(r5, MemOperand(r3, key, LSR, 1));
3757 break;
3758 case EXTERNAL_BYTE_ELEMENTS:
3759 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3760 __ strb(r5, MemOperand(r3, key, LSR, 1));
3761 break;
3762 case EXTERNAL_SHORT_ELEMENTS:
3763 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3764 __ strh(r5, MemOperand(r3, key, LSL, 0));
3765 break;
3766 case EXTERNAL_INT_ELEMENTS:
3767 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3768 __ str(r5, MemOperand(r3, key, LSL, 1));
3769 break;
3770 case EXTERNAL_FLOAT_ELEMENTS:
3771 // Perform int-to-float conversion and store to memory.
3772 __ SmiUntag(r4, key);
3773 StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9);
3774 break;
3775 case EXTERNAL_DOUBLE_ELEMENTS:
3776 __ add(r3, r3, Operand(key, LSL, 2));
3777 // r3: effective address of the double element
3778 FloatingPointHelper::Destination destination;
3779 if (CpuFeatures::IsSupported(VFP3)) {
3780 destination = FloatingPointHelper::kVFPRegisters;
3781 } else {
3782 destination = FloatingPointHelper::kCoreRegisters;
3783 }
3784 FloatingPointHelper::ConvertIntToDouble(
3785 masm, r5, destination,
3786 d0, r6, r7, // These are: double_dst, dst1, dst2.
3787 r4, s2); // These are: scratch2, single_scratch.
3788 if (destination == FloatingPointHelper::kVFPRegisters) {
3789 CpuFeatures::Scope scope(VFP3);
3790 __ vstr(d0, r3, 0);
3791 } else {
3792 __ str(r6, MemOperand(r3, 0));
3793 __ str(r7, MemOperand(r3, Register::kSizeInBytes));
3794 }
3795 break;
3796 case FAST_ELEMENTS:
3797 case FAST_SMI_ONLY_ELEMENTS:
3798 case FAST_DOUBLE_ELEMENTS:
3799 case DICTIONARY_ELEMENTS:
3800 case NON_STRICT_ARGUMENTS_ELEMENTS:
3801 UNREACHABLE();
3802 break;
3803 }
3804
3805 // Entry registers are intact, r0 holds the value which is the return value.
3806 __ Ret();
3807
3808 if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
3809 // r3: external array.
3810 __ bind(&check_heap_number);
3811 __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
3812 __ b(ne, &slow);
3813
3814 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
3815
3816 // r3: base pointer of external storage.
3817
3818 // The WebGL specification leaves the behavior of storing NaN and
3819 // +/-Infinity into integer arrays basically undefined. For more
3820 // reproducible behavior, convert these to zero.
3821 if (CpuFeatures::IsSupported(VFP3)) {
3822 CpuFeatures::Scope scope(VFP3);
3823
3824 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3825 // vldr requires offset to be a multiple of 4 so we can not
3826 // include -kHeapObjectTag into it.
3827 __ sub(r5, r0, Operand(kHeapObjectTag));
3828 __ vldr(d0, r5, HeapNumber::kValueOffset);
3829 __ add(r5, r3, Operand(key, LSL, 1));
3830 __ vcvt_f32_f64(s0, d0);
3831 __ vstr(s0, r5, 0);
3832 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3833 __ sub(r5, r0, Operand(kHeapObjectTag));
3834 __ vldr(d0, r5, HeapNumber::kValueOffset);
3835 __ add(r5, r3, Operand(key, LSL, 2));
3836 __ vstr(d0, r5, 0);
3837 } else {
3838 // Hoisted load. vldr requires offset to be a multiple of 4 so we can
3839 // not include -kHeapObjectTag into it.
3840 __ sub(r5, value, Operand(kHeapObjectTag));
3841 __ vldr(d0, r5, HeapNumber::kValueOffset);
3842 __ EmitECMATruncate(r5, d0, s2, r6, r7, r9);
3843
3844 switch (elements_kind) {
3845 case EXTERNAL_BYTE_ELEMENTS:
3846 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3847 __ strb(r5, MemOperand(r3, key, LSR, 1));
3848 break;
3849 case EXTERNAL_SHORT_ELEMENTS:
3850 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3851 __ strh(r5, MemOperand(r3, key, LSL, 0));
3852 break;
3853 case EXTERNAL_INT_ELEMENTS:
3854 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3855 __ str(r5, MemOperand(r3, key, LSL, 1));
3856 break;
3857 case EXTERNAL_PIXEL_ELEMENTS:
3858 case EXTERNAL_FLOAT_ELEMENTS:
3859 case EXTERNAL_DOUBLE_ELEMENTS:
3860 case FAST_ELEMENTS:
3861 case FAST_SMI_ONLY_ELEMENTS:
3862 case FAST_DOUBLE_ELEMENTS:
3863 case DICTIONARY_ELEMENTS:
3864 case NON_STRICT_ARGUMENTS_ELEMENTS:
3865 UNREACHABLE();
3866 break;
3867 }
3868 }
3869
3870 // Entry registers are intact, r0 holds the value which is the return
3871 // value.
3872 __ Ret();
3873 } else {
3874 // VFP3 is not available do manual conversions.
3875 __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
3876 __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
3877
3878 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3879 Label done, nan_or_infinity_or_zero;
3880 static const int kMantissaInHiWordShift =
3881 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
3882
3883 static const int kMantissaInLoWordShift =
3884 kBitsPerInt - kMantissaInHiWordShift;
3885
3886 // Test for all special exponent values: zeros, subnormal numbers, NaNs
3887 // and infinities. All these should be converted to 0.
3888 __ mov(r7, Operand(HeapNumber::kExponentMask));
3889 __ and_(r9, r5, Operand(r7), SetCC);
3890 __ b(eq, &nan_or_infinity_or_zero);
3891
3892 __ teq(r9, Operand(r7));
3893 __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
3894 __ b(eq, &nan_or_infinity_or_zero);
3895
3896 // Rebias exponent.
3897 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
3898 __ add(r9,
3899 r9,
3900 Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
3901
3902 __ cmp(r9, Operand(kBinary32MaxExponent));
3903 __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
3904 __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
3905 __ b(gt, &done);
3906
3907 __ cmp(r9, Operand(kBinary32MinExponent));
3908 __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
3909 __ b(lt, &done);
3910
3911 __ and_(r7, r5, Operand(HeapNumber::kSignMask));
3912 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
3913 __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
3914 __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
3915 __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
3916
3917 __ bind(&done);
3918 __ str(r5, MemOperand(r3, key, LSL, 1));
3919 // Entry registers are intact, r0 holds the value which is the return
3920 // value.
3921 __ Ret();
3922
3923 __ bind(&nan_or_infinity_or_zero);
3924 __ and_(r7, r5, Operand(HeapNumber::kSignMask));
3925 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
3926 __ orr(r9, r9, r7);
3927 __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
3928 __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
3929 __ b(&done);
3930 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3931 __ add(r7, r3, Operand(key, LSL, 2));
3932 // r7: effective address of destination element.
3933 __ str(r6, MemOperand(r7, 0));
3934 __ str(r5, MemOperand(r7, Register::kSizeInBytes));
3935 __ Ret();
3936 } else {
3937 bool is_signed_type = IsElementTypeSigned(elements_kind);
3938 int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
3939 int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
3940
3941 Label done, sign;
3942
3943 // Test for all special exponent values: zeros, subnormal numbers, NaNs
3944 // and infinities. All these should be converted to 0.
3945 __ mov(r7, Operand(HeapNumber::kExponentMask));
3946 __ and_(r9, r5, Operand(r7), SetCC);
3947 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
3948 __ b(eq, &done);
3949
3950 __ teq(r9, Operand(r7));
3951 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
3952 __ b(eq, &done);
3953
3954 // Unbias exponent.
3955 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
3956 __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
3957 // If exponent is negative then result is 0.
3958 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
3959 __ b(mi, &done);
3960
3961 // If exponent is too big then result is minimal value.
3962 __ cmp(r9, Operand(meaningfull_bits - 1));
3963 __ mov(r5, Operand(min_value), LeaveCC, ge);
3964 __ b(ge, &done);
3965
3966 __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
3967 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
3968 __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
3969
3970 __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
3971 __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
3972 __ b(pl, &sign);
3973
3974 __ rsb(r9, r9, Operand(0, RelocInfo::NONE));
3975 __ mov(r5, Operand(r5, LSL, r9));
3976 __ rsb(r9, r9, Operand(meaningfull_bits));
3977 __ orr(r5, r5, Operand(r6, LSR, r9));
3978
3979 __ bind(&sign);
3980 __ teq(r7, Operand(0, RelocInfo::NONE));
3981 __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
3982
3983 __ bind(&done);
3984 switch (elements_kind) {
3985 case EXTERNAL_BYTE_ELEMENTS:
3986 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3987 __ strb(r5, MemOperand(r3, key, LSR, 1));
3988 break;
3989 case EXTERNAL_SHORT_ELEMENTS:
3990 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3991 __ strh(r5, MemOperand(r3, key, LSL, 0));
3992 break;
3993 case EXTERNAL_INT_ELEMENTS:
3994 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3995 __ str(r5, MemOperand(r3, key, LSL, 1));
3996 break;
3997 case EXTERNAL_PIXEL_ELEMENTS:
3998 case EXTERNAL_FLOAT_ELEMENTS:
3999 case EXTERNAL_DOUBLE_ELEMENTS:
4000 case FAST_ELEMENTS:
4001 case FAST_SMI_ONLY_ELEMENTS:
4002 case FAST_DOUBLE_ELEMENTS:
4003 case DICTIONARY_ELEMENTS:
4004 case NON_STRICT_ARGUMENTS_ELEMENTS:
4005 UNREACHABLE();
4006 break;
4007 }
4008 }
4009 }
4010 }
4011
4012 // Slow case, key and receiver still in r0 and r1.
4013 __ bind(&slow);
4014 __ IncrementCounter(
4015 masm->isolate()->counters()->keyed_load_external_array_slow(),
4016 1, r2, r3);
4017
4018 // ---------- S t a t e --------------
4019 // -- lr : return address
4020 // -- r0 : key
4021 // -- r1 : receiver
4022 // -----------------------------------
4023 Handle<Code> slow_ic =
4024 masm->isolate()->builtins()->KeyedStoreIC_Slow();
4025 __ Jump(slow_ic, RelocInfo::CODE_TARGET);
4026
4027 // Miss case, call the runtime.
4028 __ bind(&miss_force_generic);
4029
4030 // ---------- S t a t e --------------
4031 // -- lr : return address
4032 // -- r0 : key
4033 // -- r1 : receiver
4034 // -----------------------------------
4035
4036 Handle<Code> miss_ic =
4037 masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
4038 __ Jump(miss_ic, RelocInfo::CODE_TARGET);
4039 }
4040
4041
GenerateLoadFastElement(MacroAssembler * masm)4042 void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
4043 // ----------- S t a t e -------------
4044 // -- lr : return address
4045 // -- r0 : key
4046 // -- r1 : receiver
4047 // -----------------------------------
4048 Label miss_force_generic;
4049
4050 // This stub is meant to be tail-jumped to, the receiver must already
4051 // have been verified by the caller to not be a smi.
4052
4053 // Check that the key is a smi.
4054 __ JumpIfNotSmi(r0, &miss_force_generic);
4055
4056 // Get the elements array.
4057 __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
4058 __ AssertFastElements(r2);
4059
4060 // Check that the key is within bounds.
4061 __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
4062 __ cmp(r0, Operand(r3));
4063 __ b(hs, &miss_force_generic);
4064
4065 // Load the result and make sure it's not the hole.
4066 __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4067 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
4068 __ ldr(r4,
4069 MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
4070 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
4071 __ cmp(r4, ip);
4072 __ b(eq, &miss_force_generic);
4073 __ mov(r0, r4);
4074 __ Ret();
4075
4076 __ bind(&miss_force_generic);
4077 Handle<Code> stub =
4078 masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
4079 __ Jump(stub, RelocInfo::CODE_TARGET);
4080 }
4081
4082
GenerateLoadFastDoubleElement(MacroAssembler * masm)4083 void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
4084 MacroAssembler* masm) {
4085 // ----------- S t a t e -------------
4086 // -- lr : return address
4087 // -- r0 : key
4088 // -- r1 : receiver
4089 // -----------------------------------
4090 Label miss_force_generic, slow_allocate_heapnumber;
4091
4092 Register key_reg = r0;
4093 Register receiver_reg = r1;
4094 Register elements_reg = r2;
4095 Register heap_number_reg = r2;
4096 Register indexed_double_offset = r3;
4097 Register scratch = r4;
4098 Register scratch2 = r5;
4099 Register scratch3 = r6;
4100 Register heap_number_map = r7;
4101
4102 // This stub is meant to be tail-jumped to, the receiver must already
4103 // have been verified by the caller to not be a smi.
4104
4105 // Check that the key is a smi.
4106 __ JumpIfNotSmi(key_reg, &miss_force_generic);
4107
4108 // Get the elements array.
4109 __ ldr(elements_reg,
4110 FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
4111
4112 // Check that the key is within bounds.
4113 __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
4114 __ cmp(key_reg, Operand(scratch));
4115 __ b(hs, &miss_force_generic);
4116
4117 // Load the upper word of the double in the fixed array and test for NaN.
4118 __ add(indexed_double_offset, elements_reg,
4119 Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
4120 uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
4121 __ ldr(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
4122 __ cmp(scratch, Operand(kHoleNanUpper32));
4123 __ b(&miss_force_generic, eq);
4124
4125 // Non-NaN. Allocate a new heap number and copy the double value into it.
4126 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4127 __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
4128 heap_number_map, &slow_allocate_heapnumber);
4129
4130 // Don't need to reload the upper 32 bits of the double, it's already in
4131 // scratch.
4132 __ str(scratch, FieldMemOperand(heap_number_reg,
4133 HeapNumber::kExponentOffset));
4134 __ ldr(scratch, FieldMemOperand(indexed_double_offset,
4135 FixedArray::kHeaderSize));
4136 __ str(scratch, FieldMemOperand(heap_number_reg,
4137 HeapNumber::kMantissaOffset));
4138
4139 __ mov(r0, heap_number_reg);
4140 __ Ret();
4141
4142 __ bind(&slow_allocate_heapnumber);
4143 Handle<Code> slow_ic =
4144 masm->isolate()->builtins()->KeyedLoadIC_Slow();
4145 __ Jump(slow_ic, RelocInfo::CODE_TARGET);
4146
4147 __ bind(&miss_force_generic);
4148 Handle<Code> miss_ic =
4149 masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
4150 __ Jump(miss_ic, RelocInfo::CODE_TARGET);
4151 }
4152
4153
GenerateStoreFastElement(MacroAssembler * masm,bool is_js_array,ElementsKind elements_kind,KeyedAccessGrowMode grow_mode)4154 void KeyedStoreStubCompiler::GenerateStoreFastElement(
4155 MacroAssembler* masm,
4156 bool is_js_array,
4157 ElementsKind elements_kind,
4158 KeyedAccessGrowMode grow_mode) {
4159 // ----------- S t a t e -------------
4160 // -- r0 : value
4161 // -- r1 : key
4162 // -- r2 : receiver
4163 // -- lr : return address
4164 // -- r3 : scratch
4165 // -- r4 : scratch (elements)
4166 // -----------------------------------
4167 Label miss_force_generic, transition_elements_kind, grow, slow;
4168 Label finish_store, check_capacity;
4169
4170 Register value_reg = r0;
4171 Register key_reg = r1;
4172 Register receiver_reg = r2;
4173 Register scratch = r4;
4174 Register elements_reg = r3;
4175 Register length_reg = r5;
4176 Register scratch2 = r6;
4177
4178 // This stub is meant to be tail-jumped to, the receiver must already
4179 // have been verified by the caller to not be a smi.
4180
4181 // Check that the key is a smi.
4182 __ JumpIfNotSmi(key_reg, &miss_force_generic);
4183
4184 if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
4185 __ JumpIfNotSmi(value_reg, &transition_elements_kind);
4186 }
4187
4188 // Check that the key is within bounds.
4189 __ ldr(elements_reg,
4190 FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
4191 if (is_js_array) {
4192 __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4193 } else {
4194 __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
4195 }
4196 // Compare smis.
4197 __ cmp(key_reg, scratch);
4198 if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
4199 __ b(hs, &grow);
4200 } else {
4201 __ b(hs, &miss_force_generic);
4202 }
4203
4204 // Make sure elements is a fast element array, not 'cow'.
4205 __ CheckMap(elements_reg,
4206 scratch,
4207 Heap::kFixedArrayMapRootIndex,
4208 &miss_force_generic,
4209 DONT_DO_SMI_CHECK);
4210
4211 __ bind(&finish_store);
4212 if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
4213 __ add(scratch,
4214 elements_reg,
4215 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4216 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
4217 __ add(scratch,
4218 scratch,
4219 Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
4220 __ str(value_reg, MemOperand(scratch));
4221 } else {
4222 ASSERT(elements_kind == FAST_ELEMENTS);
4223 __ add(scratch,
4224 elements_reg,
4225 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4226 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
4227 __ add(scratch,
4228 scratch,
4229 Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
4230 __ str(value_reg, MemOperand(scratch));
4231 __ mov(receiver_reg, value_reg);
4232 __ RecordWrite(elements_reg, // Object.
4233 scratch, // Address.
4234 receiver_reg, // Value.
4235 kLRHasNotBeenSaved,
4236 kDontSaveFPRegs);
4237 }
4238 // value_reg (r0) is preserved.
4239 // Done.
4240 __ Ret();
4241
4242 __ bind(&miss_force_generic);
4243 Handle<Code> ic =
4244 masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
4245 __ Jump(ic, RelocInfo::CODE_TARGET);
4246
4247 __ bind(&transition_elements_kind);
4248 Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
4249 __ Jump(ic_miss, RelocInfo::CODE_TARGET);
4250
4251 if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
4252 // Grow the array by a single element if possible.
4253 __ bind(&grow);
4254
4255 // Make sure the array is only growing by a single element, anything else
4256 // must be handled by the runtime. Flags already set by previous compare.
4257 __ b(ne, &miss_force_generic);
4258
4259 // Check for the empty array, and preallocate a small backing store if
4260 // possible.
4261 __ ldr(length_reg,
4262 FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4263 __ ldr(elements_reg,
4264 FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
4265 __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
4266 __ b(ne, &check_capacity);
4267
4268 int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
4269 __ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow,
4270 TAG_OBJECT);
4271
4272 __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
4273 __ str(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
4274 __ mov(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
4275 __ str(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
4276 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
4277 for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
4278 __ str(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
4279 }
4280
4281 // Store the element at index zero.
4282 __ str(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
4283
4284 // Install the new backing store in the JSArray.
4285 __ str(elements_reg,
4286 FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
4287 __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
4288 scratch, kLRHasNotBeenSaved, kDontSaveFPRegs,
4289 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
4290
4291 // Increment the length of the array.
4292 __ mov(length_reg, Operand(Smi::FromInt(1)));
4293 __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4294 __ Ret();
4295
4296 __ bind(&check_capacity);
4297 // Check for cow elements, in general they are not handled by this stub
4298 __ CheckMap(elements_reg,
4299 scratch,
4300 Heap::kFixedCOWArrayMapRootIndex,
4301 &miss_force_generic,
4302 DONT_DO_SMI_CHECK);
4303
4304 __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
4305 __ cmp(length_reg, scratch);
4306 __ b(hs, &slow);
4307
4308 // Grow the array and finish the store.
4309 __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
4310 __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4311 __ jmp(&finish_store);
4312
4313 __ bind(&slow);
4314 Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
4315 __ Jump(ic_slow, RelocInfo::CODE_TARGET);
4316 }
4317 }
4318
4319
GenerateStoreFastDoubleElement(MacroAssembler * masm,bool is_js_array,KeyedAccessGrowMode grow_mode)4320 void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
4321 MacroAssembler* masm,
4322 bool is_js_array,
4323 KeyedAccessGrowMode grow_mode) {
4324 // ----------- S t a t e -------------
4325 // -- r0 : value
4326 // -- r1 : key
4327 // -- r2 : receiver
4328 // -- lr : return address
4329 // -- r3 : scratch
4330 // -- r4 : scratch
4331 // -- r5 : scratch
4332 // -----------------------------------
4333 Label miss_force_generic, transition_elements_kind, grow, slow;
4334 Label finish_store, check_capacity;
4335
4336 Register value_reg = r0;
4337 Register key_reg = r1;
4338 Register receiver_reg = r2;
4339 Register elements_reg = r3;
4340 Register scratch1 = r4;
4341 Register scratch2 = r5;
4342 Register scratch3 = r6;
4343 Register scratch4 = r7;
4344 Register length_reg = r7;
4345
4346 // This stub is meant to be tail-jumped to, the receiver must already
4347 // have been verified by the caller to not be a smi.
4348 __ JumpIfNotSmi(key_reg, &miss_force_generic);
4349
4350 __ ldr(elements_reg,
4351 FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
4352
4353 // Check that the key is within bounds.
4354 if (is_js_array) {
4355 __ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4356 } else {
4357 __ ldr(scratch1,
4358 FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
4359 }
4360 // Compare smis, unsigned compare catches both negative and out-of-bound
4361 // indexes.
4362 __ cmp(key_reg, scratch1);
4363 if (grow_mode == ALLOW_JSARRAY_GROWTH) {
4364 __ b(hs, &grow);
4365 } else {
4366 __ b(hs, &miss_force_generic);
4367 }
4368
4369 __ bind(&finish_store);
4370 __ StoreNumberToDoubleElements(value_reg,
4371 key_reg,
4372 receiver_reg,
4373 elements_reg,
4374 scratch1,
4375 scratch2,
4376 scratch3,
4377 scratch4,
4378 &transition_elements_kind);
4379 __ Ret();
4380
4381 // Handle store cache miss, replacing the ic with the generic stub.
4382 __ bind(&miss_force_generic);
4383 Handle<Code> ic =
4384 masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
4385 __ Jump(ic, RelocInfo::CODE_TARGET);
4386
4387 __ bind(&transition_elements_kind);
4388 Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
4389 __ Jump(ic_miss, RelocInfo::CODE_TARGET);
4390
4391 if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
4392 // Grow the array by a single element if possible.
4393 __ bind(&grow);
4394
4395 // Make sure the array is only growing by a single element, anything else
4396 // must be handled by the runtime. Flags already set by previous compare.
4397 __ b(ne, &miss_force_generic);
4398
4399 // Transition on values that can't be stored in a FixedDoubleArray.
4400 Label value_is_smi;
4401 __ JumpIfSmi(value_reg, &value_is_smi);
4402 __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
4403 __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
4404 __ b(ne, &transition_elements_kind);
4405 __ bind(&value_is_smi);
4406
4407 // Check for the empty array, and preallocate a small backing store if
4408 // possible.
4409 __ ldr(length_reg,
4410 FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4411 __ ldr(elements_reg,
4412 FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
4413 __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
4414 __ b(ne, &check_capacity);
4415
4416 int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
4417 __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
4418 TAG_OBJECT);
4419
4420 // Initialize the new FixedDoubleArray. Leave elements unitialized for
4421 // efficiency, they are guaranteed to be initialized before use.
4422 __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
4423 __ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
4424 __ mov(scratch1,
4425 Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
4426 __ str(scratch1,
4427 FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
4428
4429 // Install the new backing store in the JSArray.
4430 __ str(elements_reg,
4431 FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
4432 __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
4433 scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs,
4434 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
4435
4436 // Increment the length of the array.
4437 __ mov(length_reg, Operand(Smi::FromInt(1)));
4438 __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4439 __ ldr(elements_reg,
4440 FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
4441 __ jmp(&finish_store);
4442
4443 __ bind(&check_capacity);
4444 // Make sure that the backing store can hold additional elements.
4445 __ ldr(scratch1,
4446 FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
4447 __ cmp(length_reg, scratch1);
4448 __ b(hs, &slow);
4449
4450 // Grow the array and finish the store.
4451 __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
4452 __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4453 __ jmp(&finish_store);
4454
4455 __ bind(&slow);
4456 Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
4457 __ Jump(ic_slow, RelocInfo::CODE_TARGET);
4458 }
4459 }
4460
4461
4462 #undef __
4463
4464 } } // namespace v8::internal
4465
4466 #endif // V8_TARGET_ARCH_ARM
4467