• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_MIPS)
31 
32 #include "ic-inl.h"
33 #include "codegen.h"
34 #include "stub-cache.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 #define __ ACCESS_MASM(masm)
40 
41 
ProbeTable(Isolate * isolate,MacroAssembler * masm,Code::Flags flags,StubCache::Table table,Register receiver,Register name,Register offset,Register scratch,Register scratch2,Register offset_scratch)42 static void ProbeTable(Isolate* isolate,
43                        MacroAssembler* masm,
44                        Code::Flags flags,
45                        StubCache::Table table,
46                        Register receiver,
47                        Register name,
48                        // Number of the cache entry, not scaled.
49                        Register offset,
50                        Register scratch,
51                        Register scratch2,
52                        Register offset_scratch) {
53   ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
54   ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
55   ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
56 
57   uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
58   uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
59   uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
60 
61   // Check the relative positions of the address fields.
62   ASSERT(value_off_addr > key_off_addr);
63   ASSERT((value_off_addr - key_off_addr) % 4 == 0);
64   ASSERT((value_off_addr - key_off_addr) < (256 * 4));
65   ASSERT(map_off_addr > key_off_addr);
66   ASSERT((map_off_addr - key_off_addr) % 4 == 0);
67   ASSERT((map_off_addr - key_off_addr) < (256 * 4));
68 
69   Label miss;
70   Register base_addr = scratch;
71   scratch = no_reg;
72 
73   // Multiply by 3 because there are 3 fields per entry (name, code, map).
74   __ sll(offset_scratch, offset, 1);
75   __ Addu(offset_scratch, offset_scratch, offset);
76 
77   // Calculate the base address of the entry.
78   __ li(base_addr, Operand(key_offset));
79   __ sll(at, offset_scratch, kPointerSizeLog2);
80   __ Addu(base_addr, base_addr, at);
81 
82   // Check that the key in the entry matches the name.
83   __ lw(at, MemOperand(base_addr, 0));
84   __ Branch(&miss, ne, name, Operand(at));
85 
86   // Check the map matches.
87   __ lw(at, MemOperand(base_addr, map_off_addr - key_off_addr));
88   __ lw(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
89   __ Branch(&miss, ne, at, Operand(scratch2));
90 
91   // Get the code entry from the cache.
92   Register code = scratch2;
93   scratch2 = no_reg;
94   __ lw(code, MemOperand(base_addr, value_off_addr - key_off_addr));
95 
96   // Check that the flags match what we're looking for.
97   Register flags_reg = base_addr;
98   base_addr = no_reg;
99   __ lw(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
100   __ And(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
101   __ Branch(&miss, ne, flags_reg, Operand(flags));
102 
103 #ifdef DEBUG
104     if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
105       __ jmp(&miss);
106     } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
107       __ jmp(&miss);
108     }
109 #endif
110 
111   // Jump to the first instruction in the code stub.
112   __ Addu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
113   __ Jump(at);
114 
115   // Miss: fall through.
116   __ bind(&miss);
117 }
118 
119 
120 // Helper function used to check that the dictionary doesn't contain
121 // the property. This function may return false negatives, so miss_label
122 // must always call a backup property check that is complete.
123 // This function is safe to call if the receiver has fast properties.
124 // Name must be a symbol and receiver must be a heap object.
GenerateDictionaryNegativeLookup(MacroAssembler * masm,Label * miss_label,Register receiver,Handle<String> name,Register scratch0,Register scratch1)125 static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
126                                              Label* miss_label,
127                                              Register receiver,
128                                              Handle<String> name,
129                                              Register scratch0,
130                                              Register scratch1) {
131   ASSERT(name->IsSymbol());
132   Counters* counters = masm->isolate()->counters();
133   __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
134   __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
135 
136   Label done;
137 
138   const int kInterceptorOrAccessCheckNeededMask =
139       (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
140 
141   // Bail out if the receiver has a named interceptor or requires access checks.
142   Register map = scratch1;
143   __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
144   __ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
145   __ And(scratch0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
146   __ Branch(miss_label, ne, scratch0, Operand(zero_reg));
147 
148   // Check that receiver is a JSObject.
149   __ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
150   __ Branch(miss_label, lt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
151 
152   // Load properties array.
153   Register properties = scratch0;
154   __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
155   // Check that the properties array is a dictionary.
156   __ lw(map, FieldMemOperand(properties, HeapObject::kMapOffset));
157   Register tmp = properties;
158   __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
159   __ Branch(miss_label, ne, map, Operand(tmp));
160 
161   // Restore the temporarily used register.
162   __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
163 
164 
165   StringDictionaryLookupStub::GenerateNegativeLookup(masm,
166                                                      miss_label,
167                                                      &done,
168                                                      receiver,
169                                                      properties,
170                                                      name,
171                                                      scratch1);
172   __ bind(&done);
173   __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
174 }
175 
176 
GenerateProbe(MacroAssembler * masm,Code::Flags flags,Register receiver,Register name,Register scratch,Register extra,Register extra2,Register extra3)177 void StubCache::GenerateProbe(MacroAssembler* masm,
178                               Code::Flags flags,
179                               Register receiver,
180                               Register name,
181                               Register scratch,
182                               Register extra,
183                               Register extra2,
184                               Register extra3) {
185   Isolate* isolate = masm->isolate();
186   Label miss;
187 
188   // Make sure that code is valid. The multiplying code relies on the
189   // entry size being 12.
190   ASSERT(sizeof(Entry) == 12);
191 
192   // Make sure the flags does not name a specific type.
193   ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
194 
195   // Make sure that there are no register conflicts.
196   ASSERT(!scratch.is(receiver));
197   ASSERT(!scratch.is(name));
198   ASSERT(!extra.is(receiver));
199   ASSERT(!extra.is(name));
200   ASSERT(!extra.is(scratch));
201   ASSERT(!extra2.is(receiver));
202   ASSERT(!extra2.is(name));
203   ASSERT(!extra2.is(scratch));
204   ASSERT(!extra2.is(extra));
205 
206   // Check register validity.
207   ASSERT(!scratch.is(no_reg));
208   ASSERT(!extra.is(no_reg));
209   ASSERT(!extra2.is(no_reg));
210   ASSERT(!extra3.is(no_reg));
211 
212   Counters* counters = masm->isolate()->counters();
213   __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
214                       extra2, extra3);
215 
216   // Check that the receiver isn't a smi.
217   __ JumpIfSmi(receiver, &miss);
218 
219   // Get the map of the receiver and compute the hash.
220   __ lw(scratch, FieldMemOperand(name, String::kHashFieldOffset));
221   __ lw(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
222   __ Addu(scratch, scratch, at);
223   uint32_t mask = kPrimaryTableSize - 1;
224   // We shift out the last two bits because they are not part of the hash and
225   // they are always 01 for maps.
226   __ srl(scratch, scratch, kHeapObjectTagSize);
227   __ Xor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask));
228   __ And(scratch, scratch, Operand(mask));
229 
230   // Probe the primary table.
231   ProbeTable(isolate,
232              masm,
233              flags,
234              kPrimary,
235              receiver,
236              name,
237              scratch,
238              extra,
239              extra2,
240              extra3);
241 
242   // Primary miss: Compute hash for secondary probe.
243   __ srl(at, name, kHeapObjectTagSize);
244   __ Subu(scratch, scratch, at);
245   uint32_t mask2 = kSecondaryTableSize - 1;
246   __ Addu(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
247   __ And(scratch, scratch, Operand(mask2));
248 
249   // Probe the secondary table.
250   ProbeTable(isolate,
251              masm,
252              flags,
253              kSecondary,
254              receiver,
255              name,
256              scratch,
257              extra,
258              extra2,
259              extra3);
260 
261   // Cache miss: Fall-through and let caller handle the miss by
262   // entering the runtime system.
263   __ bind(&miss);
264   __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
265                       extra2, extra3);
266 }
267 
268 
GenerateLoadGlobalFunctionPrototype(MacroAssembler * masm,int index,Register prototype)269 void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
270                                                        int index,
271                                                        Register prototype) {
272   // Load the global or builtins object from the current context.
273   __ lw(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
274   // Load the global context from the global or builtins object.
275   __ lw(prototype,
276          FieldMemOperand(prototype, GlobalObject::kGlobalContextOffset));
277   // Load the function from the global context.
278   __ lw(prototype, MemOperand(prototype, Context::SlotOffset(index)));
279   // Load the initial map.  The global functions all have initial maps.
280   __ lw(prototype,
281          FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
282   // Load the prototype from the initial map.
283   __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
284 }
285 
286 
GenerateDirectLoadGlobalFunctionPrototype(MacroAssembler * masm,int index,Register prototype,Label * miss)287 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
288     MacroAssembler* masm,
289     int index,
290     Register prototype,
291     Label* miss) {
292   Isolate* isolate = masm->isolate();
293   // Check we're still in the same context.
294   __ lw(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
295   ASSERT(!prototype.is(at));
296   __ li(at, isolate->global());
297   __ Branch(miss, ne, prototype, Operand(at));
298   // Get the global function with the given index.
299   Handle<JSFunction> function(
300       JSFunction::cast(isolate->global_context()->get(index)));
301   // Load its initial map. The global functions all have initial maps.
302   __ li(prototype, Handle<Map>(function->initial_map()));
303   // Load the prototype from the initial map.
304   __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
305 }
306 
307 
308 // Load a fast property out of a holder object (src). In-object properties
309 // are loaded directly otherwise the property is loaded from the properties
310 // fixed array.
GenerateFastPropertyLoad(MacroAssembler * masm,Register dst,Register src,Handle<JSObject> holder,int index)311 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
312                                             Register dst,
313                                             Register src,
314                                             Handle<JSObject> holder,
315                                             int index) {
316   // Adjust for the number of properties stored in the holder.
317   index -= holder->map()->inobject_properties();
318   if (index < 0) {
319     // Get the property straight out of the holder.
320     int offset = holder->map()->instance_size() + (index * kPointerSize);
321     __ lw(dst, FieldMemOperand(src, offset));
322   } else {
323     // Calculate the offset into the properties array.
324     int offset = index * kPointerSize + FixedArray::kHeaderSize;
325     __ lw(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
326     __ lw(dst, FieldMemOperand(dst, offset));
327   }
328 }
329 
330 
GenerateLoadArrayLength(MacroAssembler * masm,Register receiver,Register scratch,Label * miss_label)331 void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
332                                            Register receiver,
333                                            Register scratch,
334                                            Label* miss_label) {
335   // Check that the receiver isn't a smi.
336   __ JumpIfSmi(receiver, miss_label);
337 
338   // Check that the object is a JS array.
339   __ GetObjectType(receiver, scratch, scratch);
340   __ Branch(miss_label, ne, scratch, Operand(JS_ARRAY_TYPE));
341 
342   // Load length directly from the JS array.
343   __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
344   __ Ret();
345 }
346 
347 
348 // Generate code to check if an object is a string.  If the object is a
349 // heap object, its map's instance type is left in the scratch1 register.
350 // If this is not needed, scratch1 and scratch2 may be the same register.
GenerateStringCheck(MacroAssembler * masm,Register receiver,Register scratch1,Register scratch2,Label * smi,Label * non_string_object)351 static void GenerateStringCheck(MacroAssembler* masm,
352                                 Register receiver,
353                                 Register scratch1,
354                                 Register scratch2,
355                                 Label* smi,
356                                 Label* non_string_object) {
357   // Check that the receiver isn't a smi.
358   __ JumpIfSmi(receiver, smi, t0);
359 
360   // Check that the object is a string.
361   __ lw(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
362   __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
363   __ And(scratch2, scratch1, Operand(kIsNotStringMask));
364   // The cast is to resolve the overload for the argument of 0x0.
365   __ Branch(non_string_object,
366             ne,
367             scratch2,
368             Operand(static_cast<int32_t>(kStringTag)));
369 }
370 
371 
372 // Generate code to load the length from a string object and return the length.
373 // If the receiver object is not a string or a wrapped string object the
374 // execution continues at the miss label. The register containing the
375 // receiver is potentially clobbered.
GenerateLoadStringLength(MacroAssembler * masm,Register receiver,Register scratch1,Register scratch2,Label * miss,bool support_wrappers)376 void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
377                                             Register receiver,
378                                             Register scratch1,
379                                             Register scratch2,
380                                             Label* miss,
381                                             bool support_wrappers) {
382   Label check_wrapper;
383 
384   // Check if the object is a string leaving the instance type in the
385   // scratch1 register.
386   GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
387                       support_wrappers ? &check_wrapper : miss);
388 
389   // Load length directly from the string.
390   __ lw(v0, FieldMemOperand(receiver, String::kLengthOffset));
391   __ Ret();
392 
393   if (support_wrappers) {
394     // Check if the object is a JSValue wrapper.
395     __ bind(&check_wrapper);
396     __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE));
397 
398     // Unwrap the value and check if the wrapped value is a string.
399     __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
400     GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
401     __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
402     __ Ret();
403   }
404 }
405 
406 
GenerateLoadFunctionPrototype(MacroAssembler * masm,Register receiver,Register scratch1,Register scratch2,Label * miss_label)407 void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
408                                                  Register receiver,
409                                                  Register scratch1,
410                                                  Register scratch2,
411                                                  Label* miss_label) {
412   __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
413   __ mov(v0, scratch1);
414   __ Ret();
415 }
416 
417 
418 // Generate StoreField code, value is passed in a0 register.
419 // After executing generated code, the receiver_reg and name_reg
420 // may be clobbered.
GenerateStoreField(MacroAssembler * masm,Handle<JSObject> object,int index,Handle<Map> transition,Register receiver_reg,Register name_reg,Register scratch,Label * miss_label)421 void StubCompiler::GenerateStoreField(MacroAssembler* masm,
422                                       Handle<JSObject> object,
423                                       int index,
424                                       Handle<Map> transition,
425                                       Register receiver_reg,
426                                       Register name_reg,
427                                       Register scratch,
428                                       Label* miss_label) {
429   // a0 : value.
430   Label exit;
431   // Check that the map of the object hasn't changed.
432   CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
433                                              : REQUIRE_EXACT_MAP;
434   __ CheckMap(receiver_reg, scratch, Handle<Map>(object->map()), miss_label,
435               DO_SMI_CHECK, mode);
436 
437   // Perform global security token check if needed.
438   if (object->IsJSGlobalProxy()) {
439     __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
440   }
441 
442   // Stub never generated for non-global objects that require access
443   // checks.
444   ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
445 
446   // Perform map transition for the receiver if necessary.
447   if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
448     // The properties must be extended before we can store the value.
449     // We jump to a runtime call that extends the properties array.
450     __ push(receiver_reg);
451     __ li(a2, Operand(transition));
452     __ Push(a2, a0);
453     __ TailCallExternalReference(
454            ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
455                              masm->isolate()),
456            3, 1);
457     return;
458   }
459 
460   if (!transition.is_null()) {
461     // Update the map of the object; no write barrier updating is
462     // needed because the map is never in new space.
463     __ li(t0, Operand(transition));
464     __ sw(t0, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
465   }
466 
467   // Adjust for the number of properties stored in the object. Even in the
468   // face of a transition we can use the old map here because the size of the
469   // object and the number of in-object properties is not going to change.
470   index -= object->map()->inobject_properties();
471 
472   if (index < 0) {
473     // Set the property straight into the object.
474     int offset = object->map()->instance_size() + (index * kPointerSize);
475     __ sw(a0, FieldMemOperand(receiver_reg, offset));
476 
477     // Skip updating write barrier if storing a smi.
478     __ JumpIfSmi(a0, &exit, scratch);
479 
480     // Update the write barrier for the array address.
481     // Pass the now unused name_reg as a scratch register.
482     __ mov(name_reg, a0);
483     __ RecordWriteField(receiver_reg,
484                         offset,
485                         name_reg,
486                         scratch,
487                         kRAHasNotBeenSaved,
488                         kDontSaveFPRegs);
489   } else {
490     // Write to the properties array.
491     int offset = index * kPointerSize + FixedArray::kHeaderSize;
492     // Get the properties array.
493     __ lw(scratch, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
494     __ sw(a0, FieldMemOperand(scratch, offset));
495 
496     // Skip updating write barrier if storing a smi.
497     __ JumpIfSmi(a0, &exit);
498 
499     // Update the write barrier for the array address.
500     // Ok to clobber receiver_reg and name_reg, since we return.
501     __ mov(name_reg, a0);
502     __ RecordWriteField(scratch,
503                         offset,
504                         name_reg,
505                         receiver_reg,
506                         kRAHasNotBeenSaved,
507                         kDontSaveFPRegs);
508   }
509 
510   // Return the value (register v0).
511   __ bind(&exit);
512   __ mov(v0, a0);
513   __ Ret();
514 }
515 
516 
GenerateLoadMiss(MacroAssembler * masm,Code::Kind kind)517 void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
518   ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
519   Handle<Code> code = (kind == Code::LOAD_IC)
520       ? masm->isolate()->builtins()->LoadIC_Miss()
521       : masm->isolate()->builtins()->KeyedLoadIC_Miss();
522   __ Jump(code, RelocInfo::CODE_TARGET);
523 }
524 
525 
GenerateCallFunction(MacroAssembler * masm,Handle<Object> object,const ParameterCount & arguments,Label * miss,Code::ExtraICState extra_ic_state)526 static void GenerateCallFunction(MacroAssembler* masm,
527                                  Handle<Object> object,
528                                  const ParameterCount& arguments,
529                                  Label* miss,
530                                  Code::ExtraICState extra_ic_state) {
531   // ----------- S t a t e -------------
532   //  -- a0: receiver
533   //  -- a1: function to call
534   // -----------------------------------
535   // Check that the function really is a function.
536   __ JumpIfSmi(a1, miss);
537   __ GetObjectType(a1, a3, a3);
538   __ Branch(miss, ne, a3, Operand(JS_FUNCTION_TYPE));
539 
540   // Patch the receiver on the stack with the global proxy if
541   // necessary.
542   if (object->IsGlobalObject()) {
543     __ lw(a3, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
544     __ sw(a3, MemOperand(sp, arguments.immediate() * kPointerSize));
545   }
546 
547   // Invoke the function.
548   CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
549       ? CALL_AS_FUNCTION
550       : CALL_AS_METHOD;
551   __ InvokeFunction(a1, arguments, JUMP_FUNCTION, NullCallWrapper(), call_kind);
552 }
553 
554 
PushInterceptorArguments(MacroAssembler * masm,Register receiver,Register holder,Register name,Handle<JSObject> holder_obj)555 static void PushInterceptorArguments(MacroAssembler* masm,
556                                      Register receiver,
557                                      Register holder,
558                                      Register name,
559                                      Handle<JSObject> holder_obj) {
560   __ push(name);
561   Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
562   ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
563   Register scratch = name;
564   __ li(scratch, Operand(interceptor));
565   __ Push(scratch, receiver, holder);
566   __ lw(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
567   __ push(scratch);
568 }
569 
570 
CompileCallLoadPropertyWithInterceptor(MacroAssembler * masm,Register receiver,Register holder,Register name,Handle<JSObject> holder_obj)571 static void CompileCallLoadPropertyWithInterceptor(
572     MacroAssembler* masm,
573     Register receiver,
574     Register holder,
575     Register name,
576     Handle<JSObject> holder_obj) {
577   PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
578 
579   ExternalReference ref =
580       ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
581           masm->isolate());
582   __ PrepareCEntryArgs(5);
583   __ PrepareCEntryFunction(ref);
584 
585   CEntryStub stub(1);
586   __ CallStub(&stub);
587 }
588 
589 
590 static const int kFastApiCallArguments = 3;
591 
592 
593 // Reserves space for the extra arguments to FastHandleApiCall in the
594 // caller's frame.
595 //
596 // These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
ReserveSpaceForFastApiCall(MacroAssembler * masm,Register scratch)597 static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
598                                        Register scratch) {
599   ASSERT(Smi::FromInt(0) == 0);
600   for (int i = 0; i < kFastApiCallArguments; i++) {
601     __ push(zero_reg);
602   }
603 }
604 
605 
606 // Undoes the effects of ReserveSpaceForFastApiCall.
FreeSpaceForFastApiCall(MacroAssembler * masm)607 static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
608   __ Drop(kFastApiCallArguments);
609 }
610 
611 
GenerateFastApiDirectCall(MacroAssembler * masm,const CallOptimization & optimization,int argc)612 static void GenerateFastApiDirectCall(MacroAssembler* masm,
613                                       const CallOptimization& optimization,
614                                       int argc) {
615   // ----------- S t a t e -------------
616   //  -- sp[0]              : holder (set by CheckPrototypes)
617   //  -- sp[4]              : callee JS function
618   //  -- sp[8]              : call data
619   //  -- sp[12]             : last JS argument
620   //  -- ...
621   //  -- sp[(argc + 3) * 4] : first JS argument
622   //  -- sp[(argc + 4) * 4] : receiver
623   // -----------------------------------
624   // Get the function and setup the context.
625   Handle<JSFunction> function = optimization.constant_function();
626   __ LoadHeapObject(t1, function);
627   __ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
628 
629   // Pass the additional arguments FastHandleApiCall expects.
630   Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
631   Handle<Object> call_data(api_call_info->data());
632   if (masm->isolate()->heap()->InNewSpace(*call_data)) {
633     __ li(a0, api_call_info);
634     __ lw(t2, FieldMemOperand(a0, CallHandlerInfo::kDataOffset));
635   } else {
636     __ li(t2, call_data);
637   }
638 
639   // Store JS function and call data.
640   __ sw(t1, MemOperand(sp, 1 * kPointerSize));
641   __ sw(t2, MemOperand(sp, 2 * kPointerSize));
642 
643   // a2 points to call data as expected by Arguments
644   // (refer to layout above).
645   __ Addu(a2, sp, Operand(2 * kPointerSize));
646 
647   const int kApiStackSpace = 4;
648 
649   FrameScope frame_scope(masm, StackFrame::MANUAL);
650   __ EnterExitFrame(false, kApiStackSpace);
651 
652   // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
653   // struct from the function (which is currently the case). This means we pass
654   // the first argument in a1 instead of a0. TryCallApiFunctionAndReturn
655   // will handle setting up a0.
656 
657   // a1 = v8::Arguments&
658   // Arguments is built at sp + 1 (sp is a reserved spot for ra).
659   __ Addu(a1, sp, kPointerSize);
660 
661   // v8::Arguments::implicit_args = data
662   __ sw(a2, MemOperand(a1, 0 * kPointerSize));
663   // v8::Arguments::values = last argument
664   __ Addu(t0, a2, Operand(argc * kPointerSize));
665   __ sw(t0, MemOperand(a1, 1 * kPointerSize));
666   // v8::Arguments::length_ = argc
667   __ li(t0, Operand(argc));
668   __ sw(t0, MemOperand(a1, 2 * kPointerSize));
669   // v8::Arguments::is_construct_call = 0
670   __ sw(zero_reg, MemOperand(a1, 3 * kPointerSize));
671 
672   const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
673   Address function_address = v8::ToCData<Address>(api_call_info->callback());
674   ApiFunction fun(function_address);
675   ExternalReference ref =
676       ExternalReference(&fun,
677                         ExternalReference::DIRECT_API_CALL,
678                         masm->isolate());
679   AllowExternalCallThatCantCauseGC scope(masm);
680   __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
681 }
682 
683 class CallInterceptorCompiler BASE_EMBEDDED {
684  public:
CallInterceptorCompiler(StubCompiler * stub_compiler,const ParameterCount & arguments,Register name,Code::ExtraICState extra_ic_state)685   CallInterceptorCompiler(StubCompiler* stub_compiler,
686                           const ParameterCount& arguments,
687                           Register name,
688                           Code::ExtraICState extra_ic_state)
689       : stub_compiler_(stub_compiler),
690         arguments_(arguments),
691         name_(name),
692         extra_ic_state_(extra_ic_state) {}
693 
Compile(MacroAssembler * masm,Handle<JSObject> object,Handle<JSObject> holder,Handle<String> name,LookupResult * lookup,Register receiver,Register scratch1,Register scratch2,Register scratch3,Label * miss)694   void Compile(MacroAssembler* masm,
695                Handle<JSObject> object,
696                Handle<JSObject> holder,
697                Handle<String> name,
698                LookupResult* lookup,
699                Register receiver,
700                Register scratch1,
701                Register scratch2,
702                Register scratch3,
703                Label* miss) {
704     ASSERT(holder->HasNamedInterceptor());
705     ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
706 
707     // Check that the receiver isn't a smi.
708     __ JumpIfSmi(receiver, miss);
709     CallOptimization optimization(lookup);
710     if (optimization.is_constant_call()) {
711       CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
712                        holder, lookup, name, optimization, miss);
713     } else {
714       CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
715                      name, holder, miss);
716     }
717   }
718 
719  private:
CompileCacheable(MacroAssembler * masm,Handle<JSObject> object,Register receiver,Register scratch1,Register scratch2,Register scratch3,Handle<JSObject> interceptor_holder,LookupResult * lookup,Handle<String> name,const CallOptimization & optimization,Label * miss_label)720   void CompileCacheable(MacroAssembler* masm,
721                         Handle<JSObject> object,
722                         Register receiver,
723                         Register scratch1,
724                         Register scratch2,
725                         Register scratch3,
726                         Handle<JSObject> interceptor_holder,
727                         LookupResult* lookup,
728                         Handle<String> name,
729                         const CallOptimization& optimization,
730                         Label* miss_label) {
731     ASSERT(optimization.is_constant_call());
732     ASSERT(!lookup->holder()->IsGlobalObject());
733     Counters* counters = masm->isolate()->counters();
734     int depth1 = kInvalidProtoDepth;
735     int depth2 = kInvalidProtoDepth;
736     bool can_do_fast_api_call = false;
737     if (optimization.is_simple_api_call() &&
738           !lookup->holder()->IsGlobalObject()) {
739       depth1 = optimization.GetPrototypeDepthOfExpectedType(
740           object, interceptor_holder);
741       if (depth1 == kInvalidProtoDepth) {
742         depth2 = optimization.GetPrototypeDepthOfExpectedType(
743             interceptor_holder, Handle<JSObject>(lookup->holder()));
744       }
745       can_do_fast_api_call =
746           depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
747     }
748 
749     __ IncrementCounter(counters->call_const_interceptor(), 1,
750                         scratch1, scratch2);
751 
752     if (can_do_fast_api_call) {
753       __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
754                           scratch1, scratch2);
755       ReserveSpaceForFastApiCall(masm, scratch1);
756     }
757 
758     // Check that the maps from receiver to interceptor's holder
759     // haven't changed and thus we can invoke interceptor.
760     Label miss_cleanup;
761     Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
762     Register holder =
763         stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
764                                         scratch1, scratch2, scratch3,
765                                         name, depth1, miss);
766 
767     // Invoke an interceptor and if it provides a value,
768     // branch to |regular_invoke|.
769     Label regular_invoke;
770     LoadWithInterceptor(masm, receiver, holder, interceptor_holder, scratch2,
771                         &regular_invoke);
772 
773     // Interceptor returned nothing for this property.  Try to use cached
774     // constant function.
775 
776     // Check that the maps from interceptor's holder to constant function's
777     // holder haven't changed and thus we can use cached constant function.
778     if (*interceptor_holder != lookup->holder()) {
779       stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
780                                       Handle<JSObject>(lookup->holder()),
781                                       scratch1, scratch2, scratch3,
782                                       name, depth2, miss);
783     } else {
784       // CheckPrototypes has a side effect of fetching a 'holder'
785       // for API (object which is instanceof for the signature).  It's
786       // safe to omit it here, as if present, it should be fetched
787       // by the previous CheckPrototypes.
788       ASSERT(depth2 == kInvalidProtoDepth);
789     }
790 
791     // Invoke function.
792     if (can_do_fast_api_call) {
793       GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
794     } else {
795       CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
796           ? CALL_AS_FUNCTION
797           : CALL_AS_METHOD;
798       __ InvokeFunction(optimization.constant_function(), arguments_,
799                         JUMP_FUNCTION, NullCallWrapper(), call_kind);
800     }
801 
802     // Deferred code for fast API call case---clean preallocated space.
803     if (can_do_fast_api_call) {
804       __ bind(&miss_cleanup);
805       FreeSpaceForFastApiCall(masm);
806       __ Branch(miss_label);
807     }
808 
809     // Invoke a regular function.
810     __ bind(&regular_invoke);
811     if (can_do_fast_api_call) {
812       FreeSpaceForFastApiCall(masm);
813     }
814   }
815 
CompileRegular(MacroAssembler * masm,Handle<JSObject> object,Register receiver,Register scratch1,Register scratch2,Register scratch3,Handle<String> name,Handle<JSObject> interceptor_holder,Label * miss_label)816   void CompileRegular(MacroAssembler* masm,
817                       Handle<JSObject> object,
818                       Register receiver,
819                       Register scratch1,
820                       Register scratch2,
821                       Register scratch3,
822                       Handle<String> name,
823                       Handle<JSObject> interceptor_holder,
824                       Label* miss_label) {
825     Register holder =
826         stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
827                                         scratch1, scratch2, scratch3,
828                                         name, miss_label);
829 
830     // Call a runtime function to load the interceptor property.
831     FrameScope scope(masm, StackFrame::INTERNAL);
832     // Save the name_ register across the call.
833     __ push(name_);
834 
835     PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
836 
837     __ CallExternalReference(
838           ExternalReference(
839               IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
840               masm->isolate()),
841           5);
842     // Restore the name_ register.
843     __ pop(name_);
844     // Leave the internal frame.
845   }
846 
LoadWithInterceptor(MacroAssembler * masm,Register receiver,Register holder,Handle<JSObject> holder_obj,Register scratch,Label * interceptor_succeeded)847   void LoadWithInterceptor(MacroAssembler* masm,
848                            Register receiver,
849                            Register holder,
850                            Handle<JSObject> holder_obj,
851                            Register scratch,
852                            Label* interceptor_succeeded) {
853     {
854       FrameScope scope(masm, StackFrame::INTERNAL);
855 
856       __ Push(holder, name_);
857       CompileCallLoadPropertyWithInterceptor(masm,
858                                              receiver,
859                                              holder,
860                                              name_,
861                                              holder_obj);
862       __ pop(name_);  // Restore the name.
863       __ pop(receiver);  // Restore the holder.
864     }
865     // If interceptor returns no-result sentinel, call the constant function.
866     __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
867     __ Branch(interceptor_succeeded, ne, v0, Operand(scratch));
868   }
869 
870   StubCompiler* stub_compiler_;
871   const ParameterCount& arguments_;
872   Register name_;
873   Code::ExtraICState extra_ic_state_;
874 };
875 
876 
877 
878 // Generate code to check that a global property cell is empty. Create
879 // the property cell at compilation time if no cell exists for the
880 // property.
GenerateCheckPropertyCell(MacroAssembler * masm,Handle<GlobalObject> global,Handle<String> name,Register scratch,Label * miss)881 static void GenerateCheckPropertyCell(MacroAssembler* masm,
882                                       Handle<GlobalObject> global,
883                                       Handle<String> name,
884                                       Register scratch,
885                                       Label* miss) {
886   Handle<JSGlobalPropertyCell> cell =
887       GlobalObject::EnsurePropertyCell(global, name);
888   ASSERT(cell->value()->IsTheHole());
889   __ li(scratch, Operand(cell));
890   __ lw(scratch,
891         FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
892   __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
893   __ Branch(miss, ne, scratch, Operand(at));
894 }
895 
896 
897 // Calls GenerateCheckPropertyCell for each global object in the prototype chain
898 // from object to (but not including) holder.
GenerateCheckPropertyCells(MacroAssembler * masm,Handle<JSObject> object,Handle<JSObject> holder,Handle<String> name,Register scratch,Label * miss)899 static void GenerateCheckPropertyCells(MacroAssembler* masm,
900                                        Handle<JSObject> object,
901                                        Handle<JSObject> holder,
902                                        Handle<String> name,
903                                        Register scratch,
904                                        Label* miss) {
905   Handle<JSObject> current = object;
906   while (!current.is_identical_to(holder)) {
907     if (current->IsGlobalObject()) {
908       GenerateCheckPropertyCell(masm,
909                                 Handle<GlobalObject>::cast(current),
910                                 name,
911                                 scratch,
912                                 miss);
913     }
914     current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
915   }
916 }
917 
918 
919 // Convert and store int passed in register ival to IEEE 754 single precision
920 // floating point value at memory location (dst + 4 * wordoffset)
921 // If FPU is available use it for conversion.
StoreIntAsFloat(MacroAssembler * masm,Register dst,Register wordoffset,Register ival,Register fval,Register scratch1,Register scratch2)922 static void StoreIntAsFloat(MacroAssembler* masm,
923                             Register dst,
924                             Register wordoffset,
925                             Register ival,
926                             Register fval,
927                             Register scratch1,
928                             Register scratch2) {
929   if (CpuFeatures::IsSupported(FPU)) {
930     CpuFeatures::Scope scope(FPU);
931     __ mtc1(ival, f0);
932     __ cvt_s_w(f0, f0);
933     __ sll(scratch1, wordoffset, 2);
934     __ addu(scratch1, dst, scratch1);
935     __ swc1(f0, MemOperand(scratch1, 0));
936   } else {
937     // FPU is not available,  do manual conversions.
938 
939     Label not_special, done;
940     // Move sign bit from source to destination.  This works because the sign
941     // bit in the exponent word of the double has the same position and polarity
942     // as the 2's complement sign bit in a Smi.
943     ASSERT(kBinary32SignMask == 0x80000000u);
944 
945     __ And(fval, ival, Operand(kBinary32SignMask));
946     // Negate value if it is negative.
947     __ subu(scratch1, zero_reg, ival);
948     __ Movn(ival, scratch1, fval);
949 
950     // We have -1, 0 or 1, which we treat specially. Register ival contains
951     // absolute value: it is either equal to 1 (special case of -1 and 1),
952     // greater than 1 (not a special case) or less than 1 (special case of 0).
953     __ Branch(&not_special, gt, ival, Operand(1));
954 
955     // For 1 or -1 we need to or in the 0 exponent (biased).
956     static const uint32_t exponent_word_for_1 =
957         kBinary32ExponentBias << kBinary32ExponentShift;
958 
959     __ Xor(scratch1, ival, Operand(1));
960     __ li(scratch2, exponent_word_for_1);
961     __ or_(scratch2, fval, scratch2);
962     __ Movz(fval, scratch2, scratch1);  // Only if ival is equal to 1.
963     __ Branch(&done);
964 
965     __ bind(&not_special);
966     // Count leading zeros.
967     // Gets the wrong answer for 0, but we already checked for that case above.
968     Register zeros = scratch2;
969     __ Clz(zeros, ival);
970 
971     // Compute exponent and or it into the exponent register.
972     __ li(scratch1, (kBitsPerInt - 1) + kBinary32ExponentBias);
973     __ subu(scratch1, scratch1, zeros);
974 
975     __ sll(scratch1, scratch1, kBinary32ExponentShift);
976     __ or_(fval, fval, scratch1);
977 
978     // Shift up the source chopping the top bit off.
979     __ Addu(zeros, zeros, Operand(1));
980     // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
981     __ sllv(ival, ival, zeros);
982     // And the top (top 20 bits).
983     __ srl(scratch1, ival, kBitsPerInt - kBinary32MantissaBits);
984     __ or_(fval, fval, scratch1);
985 
986     __ bind(&done);
987 
988     __ sll(scratch1, wordoffset, 2);
989     __ addu(scratch1, dst, scratch1);
990     __ sw(fval, MemOperand(scratch1, 0));
991   }
992 }
993 
994 
995 // Convert unsigned integer with specified number of leading zeroes in binary
996 // representation to IEEE 754 double.
997 // Integer to convert is passed in register hiword.
998 // Resulting double is returned in registers hiword:loword.
999 // This functions does not work correctly for 0.
GenerateUInt2Double(MacroAssembler * masm,Register hiword,Register loword,Register scratch,int leading_zeroes)1000 static void GenerateUInt2Double(MacroAssembler* masm,
1001                                 Register hiword,
1002                                 Register loword,
1003                                 Register scratch,
1004                                 int leading_zeroes) {
1005   const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
1006   const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
1007 
1008   const int mantissa_shift_for_hi_word =
1009       meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
1010 
1011   const int mantissa_shift_for_lo_word =
1012       kBitsPerInt - mantissa_shift_for_hi_word;
1013 
1014   __ li(scratch, biased_exponent << HeapNumber::kExponentShift);
1015   if (mantissa_shift_for_hi_word > 0) {
1016     __ sll(loword, hiword, mantissa_shift_for_lo_word);
1017     __ srl(hiword, hiword, mantissa_shift_for_hi_word);
1018     __ or_(hiword, scratch, hiword);
1019   } else {
1020     __ mov(loword, zero_reg);
1021     __ sll(hiword, hiword, mantissa_shift_for_hi_word);
1022     __ or_(hiword, scratch, hiword);
1023   }
1024 
1025   // If least significant bit of biased exponent was not 1 it was corrupted
1026   // by most significant bit of mantissa so we should fix that.
1027   if (!(biased_exponent & 1)) {
1028     __ li(scratch, 1 << HeapNumber::kExponentShift);
1029     __ nor(scratch, scratch, scratch);
1030     __ and_(hiword, hiword, scratch);
1031   }
1032 }
1033 
1034 
1035 #undef __
1036 #define __ ACCESS_MASM(masm())
1037 
1038 
CheckPrototypes(Handle<JSObject> object,Register object_reg,Handle<JSObject> holder,Register holder_reg,Register scratch1,Register scratch2,Handle<String> name,int save_at_depth,Label * miss)1039 Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
1040                                        Register object_reg,
1041                                        Handle<JSObject> holder,
1042                                        Register holder_reg,
1043                                        Register scratch1,
1044                                        Register scratch2,
1045                                        Handle<String> name,
1046                                        int save_at_depth,
1047                                        Label* miss) {
1048   // Make sure there's no overlap between holder and object registers.
1049   ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
1050   ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
1051          && !scratch2.is(scratch1));
1052 
1053   // Keep track of the current object in register reg.
1054   Register reg = object_reg;
1055   int depth = 0;
1056 
1057   if (save_at_depth == depth) {
1058     __ sw(reg, MemOperand(sp));
1059   }
1060 
1061   // Check the maps in the prototype chain.
1062   // Traverse the prototype chain from the object and do map checks.
1063   Handle<JSObject> current = object;
1064   while (!current.is_identical_to(holder)) {
1065     ++depth;
1066 
1067     // Only global objects and objects that do not require access
1068     // checks are allowed in stubs.
1069     ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
1070 
1071     Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
1072     if (!current->HasFastProperties() &&
1073         !current->IsJSGlobalObject() &&
1074         !current->IsJSGlobalProxy()) {
1075       if (!name->IsSymbol()) {
1076         name = factory()->LookupSymbol(name);
1077       }
1078       ASSERT(current->property_dictionary()->FindEntry(*name) ==
1079              StringDictionary::kNotFound);
1080 
1081       GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
1082                                        scratch1, scratch2);
1083 
1084       __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
1085       reg = holder_reg;  // From now on the object will be in holder_reg.
1086       __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
1087     } else {
1088       Handle<Map> current_map(current->map());
1089       __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK,
1090                   ALLOW_ELEMENT_TRANSITION_MAPS);
1091       // Check access rights to the global object.  This has to happen after
1092       // the map check so that we know that the object is actually a global
1093       // object.
1094       if (current->IsJSGlobalProxy()) {
1095         __ CheckAccessGlobalProxy(reg, scratch2, miss);
1096       }
1097       reg = holder_reg;  // From now on the object will be in holder_reg.
1098 
1099       if (heap()->InNewSpace(*prototype)) {
1100         // The prototype is in new space; we cannot store a reference to it
1101         // in the code.  Load it from the map.
1102         __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
1103       } else {
1104         // The prototype is in old space; load it directly.
1105         __ li(reg, Operand(prototype));
1106       }
1107     }
1108 
1109     if (save_at_depth == depth) {
1110       __ sw(reg, MemOperand(sp));
1111     }
1112 
1113     // Go to the next object in the prototype chain.
1114     current = prototype;
1115   }
1116 
1117   // Log the check depth.
1118   LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
1119 
1120   // Check the holder map.
1121   __ CheckMap(reg, scratch1, Handle<Map>(current->map()), miss,
1122               DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
1123 
1124   // Perform security check for access to the global object.
1125   ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
1126   if (holder->IsJSGlobalProxy()) {
1127     __ CheckAccessGlobalProxy(reg, scratch1, miss);
1128   }
1129 
1130   // If we've skipped any global objects, it's not enough to verify that
1131   // their maps haven't changed.  We also need to check that the property
1132   // cell for the property is still empty.
1133   GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
1134 
1135   // Return the register containing the holder.
1136   return reg;
1137 }
1138 
1139 
GenerateLoadField(Handle<JSObject> object,Handle<JSObject> holder,Register receiver,Register scratch1,Register scratch2,Register scratch3,int index,Handle<String> name,Label * miss)1140 void StubCompiler::GenerateLoadField(Handle<JSObject> object,
1141                                      Handle<JSObject> holder,
1142                                      Register receiver,
1143                                      Register scratch1,
1144                                      Register scratch2,
1145                                      Register scratch3,
1146                                      int index,
1147                                      Handle<String> name,
1148                                      Label* miss) {
1149   // Check that the receiver isn't a smi.
1150   __ JumpIfSmi(receiver, miss);
1151 
1152   // Check that the maps haven't changed.
1153   Register reg = CheckPrototypes(
1154       object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
1155   GenerateFastPropertyLoad(masm(), v0, reg, holder, index);
1156   __ Ret();
1157 }
1158 
1159 
GenerateLoadConstant(Handle<JSObject> object,Handle<JSObject> holder,Register receiver,Register scratch1,Register scratch2,Register scratch3,Handle<JSFunction> value,Handle<String> name,Label * miss)1160 void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
1161                                         Handle<JSObject> holder,
1162                                         Register receiver,
1163                                         Register scratch1,
1164                                         Register scratch2,
1165                                         Register scratch3,
1166                                         Handle<JSFunction> value,
1167                                         Handle<String> name,
1168                                         Label* miss) {
1169   // Check that the receiver isn't a smi.
1170   __ JumpIfSmi(receiver, miss, scratch1);
1171 
1172   // Check that the maps haven't changed.
1173   CheckPrototypes(object, receiver, holder,
1174                   scratch1, scratch2, scratch3, name, miss);
1175 
1176   // Return the constant value.
1177   __ LoadHeapObject(v0, value);
1178   __ Ret();
1179 }
1180 
1181 
GenerateLoadCallback(Handle<JSObject> object,Handle<JSObject> holder,Register receiver,Register name_reg,Register scratch1,Register scratch2,Register scratch3,Handle<AccessorInfo> callback,Handle<String> name,Label * miss)1182 void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
1183                                         Handle<JSObject> holder,
1184                                         Register receiver,
1185                                         Register name_reg,
1186                                         Register scratch1,
1187                                         Register scratch2,
1188                                         Register scratch3,
1189                                         Handle<AccessorInfo> callback,
1190                                         Handle<String> name,
1191                                         Label* miss) {
1192   // Check that the receiver isn't a smi.
1193   __ JumpIfSmi(receiver, miss, scratch1);
1194 
1195   // Check that the maps haven't changed.
1196   Register reg = CheckPrototypes(object, receiver, holder, scratch1,
1197                                  scratch2, scratch3, name, miss);
1198 
1199   // Build AccessorInfo::args_ list on the stack and push property name below
1200   // the exit frame to make GC aware of them and store pointers to them.
1201   __ push(receiver);
1202   __ mov(scratch2, sp);  // scratch2 = AccessorInfo::args_
1203   if (heap()->InNewSpace(callback->data())) {
1204     __ li(scratch3, callback);
1205     __ lw(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
1206   } else {
1207     __ li(scratch3, Handle<Object>(callback->data()));
1208   }
1209   __ Push(reg, scratch3, name_reg);
1210   __ mov(a2, scratch2);  // Saved in case scratch2 == a1.
1211   __ mov(a1, sp);  // a1 (first argument - see note below) = Handle<String>
1212 
1213   // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
1214   // struct from the function (which is currently the case). This means we pass
1215   // the arguments in a1-a2 instead of a0-a1. TryCallApiFunctionAndReturn
1216   // will handle setting up a0.
1217 
1218   const int kApiStackSpace = 1;
1219   FrameScope frame_scope(masm(), StackFrame::MANUAL);
1220   __ EnterExitFrame(false, kApiStackSpace);
1221 
1222   // Create AccessorInfo instance on the stack above the exit frame with
1223   // scratch2 (internal::Object** args_) as the data.
1224   __ sw(a2, MemOperand(sp, kPointerSize));
1225   // a2 (second argument - see note above) = AccessorInfo&
1226   __ Addu(a2, sp, kPointerSize);
1227 
1228   const int kStackUnwindSpace = 4;
1229   Address getter_address = v8::ToCData<Address>(callback->getter());
1230   ApiFunction fun(getter_address);
1231   ExternalReference ref =
1232       ExternalReference(&fun,
1233                         ExternalReference::DIRECT_GETTER_CALL,
1234                         masm()->isolate());
1235   __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
1236 }
1237 
1238 
GenerateLoadInterceptor(Handle<JSObject> object,Handle<JSObject> interceptor_holder,LookupResult * lookup,Register receiver,Register name_reg,Register scratch1,Register scratch2,Register scratch3,Handle<String> name,Label * miss)1239 void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
1240                                            Handle<JSObject> interceptor_holder,
1241                                            LookupResult* lookup,
1242                                            Register receiver,
1243                                            Register name_reg,
1244                                            Register scratch1,
1245                                            Register scratch2,
1246                                            Register scratch3,
1247                                            Handle<String> name,
1248                                            Label* miss) {
1249   ASSERT(interceptor_holder->HasNamedInterceptor());
1250   ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
1251 
1252   // Check that the receiver isn't a smi.
1253   __ JumpIfSmi(receiver, miss);
1254 
1255   // So far the most popular follow ups for interceptor loads are FIELD
1256   // and CALLBACKS, so inline only them, other cases may be added
1257   // later.
1258   bool compile_followup_inline = false;
1259   if (lookup->IsFound() && lookup->IsCacheable()) {
1260     if (lookup->type() == FIELD) {
1261       compile_followup_inline = true;
1262     } else if (lookup->type() == CALLBACKS &&
1263         lookup->GetCallbackObject()->IsAccessorInfo()) {
1264       compile_followup_inline =
1265           AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
1266     }
1267   }
1268 
1269   if (compile_followup_inline) {
1270     // Compile the interceptor call, followed by inline code to load the
1271     // property from further up the prototype chain if the call fails.
1272     // Check that the maps haven't changed.
1273     Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
1274                                           scratch1, scratch2, scratch3,
1275                                           name, miss);
1276     ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
1277 
1278     // Save necessary data before invoking an interceptor.
1279     // Requires a frame to make GC aware of pushed pointers.
1280     {
1281       FrameScope frame_scope(masm(), StackFrame::INTERNAL);
1282       if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
1283         // CALLBACKS case needs a receiver to be passed into C++ callback.
1284         __ Push(receiver, holder_reg, name_reg);
1285       } else {
1286         __ Push(holder_reg, name_reg);
1287       }
1288       // Invoke an interceptor.  Note: map checks from receiver to
1289       // interceptor's holder has been compiled before (see a caller
1290       // of this method).
1291       CompileCallLoadPropertyWithInterceptor(masm(),
1292                                              receiver,
1293                                              holder_reg,
1294                                              name_reg,
1295                                              interceptor_holder);
1296       // Check if interceptor provided a value for property.  If it's
1297       // the case, return immediately.
1298       Label interceptor_failed;
1299       __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
1300       __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
1301       frame_scope.GenerateLeaveFrame();
1302       __ Ret();
1303 
1304       __ bind(&interceptor_failed);
1305       __ pop(name_reg);
1306       __ pop(holder_reg);
1307       if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
1308         __ pop(receiver);
1309       }
1310       // Leave the internal frame.
1311     }
1312     // Check that the maps from interceptor's holder to lookup's holder
1313     // haven't changed.  And load lookup's holder into |holder| register.
1314     if (*interceptor_holder != lookup->holder()) {
1315       holder_reg = CheckPrototypes(interceptor_holder,
1316                                    holder_reg,
1317                                    Handle<JSObject>(lookup->holder()),
1318                                    scratch1,
1319                                    scratch2,
1320                                    scratch3,
1321                                    name,
1322                                    miss);
1323     }
1324 
1325     if (lookup->type() == FIELD) {
1326       // We found FIELD property in prototype chain of interceptor's holder.
1327       // Retrieve a field from field's holder.
1328       GenerateFastPropertyLoad(masm(), v0, holder_reg,
1329                                Handle<JSObject>(lookup->holder()),
1330                                lookup->GetFieldIndex());
1331       __ Ret();
1332     } else {
1333       // We found CALLBACKS property in prototype chain of interceptor's
1334       // holder.
1335       ASSERT(lookup->type() == CALLBACKS);
1336       Handle<AccessorInfo> callback(
1337           AccessorInfo::cast(lookup->GetCallbackObject()));
1338       ASSERT(callback->getter() != NULL);
1339 
1340       // Tail call to runtime.
1341       // Important invariant in CALLBACKS case: the code above must be
1342       // structured to never clobber |receiver| register.
1343       __ li(scratch2, callback);
1344       // holder_reg is either receiver or scratch1.
1345       if (!receiver.is(holder_reg)) {
1346         ASSERT(scratch1.is(holder_reg));
1347         __ Push(receiver, holder_reg);
1348         __ lw(scratch3,
1349               FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
1350         __ Push(scratch3, scratch2, name_reg);
1351       } else {
1352         __ push(receiver);
1353         __ lw(scratch3,
1354               FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
1355         __ Push(holder_reg, scratch3, scratch2, name_reg);
1356       }
1357 
1358       ExternalReference ref =
1359           ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
1360                             masm()->isolate());
1361       __ TailCallExternalReference(ref, 5, 1);
1362     }
1363   } else {  // !compile_followup_inline
1364     // Call the runtime system to load the interceptor.
1365     // Check that the maps haven't changed.
1366     Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
1367                                           scratch1, scratch2, scratch3,
1368                                           name, miss);
1369     PushInterceptorArguments(masm(), receiver, holder_reg,
1370                              name_reg, interceptor_holder);
1371 
1372     ExternalReference ref = ExternalReference(
1373         IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), masm()->isolate());
1374     __ TailCallExternalReference(ref, 5, 1);
1375   }
1376 }
1377 
1378 
GenerateNameCheck(Handle<String> name,Label * miss)1379 void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
1380   if (kind_ == Code::KEYED_CALL_IC) {
1381     __ Branch(miss, ne, a2, Operand(name));
1382   }
1383 }
1384 
1385 
GenerateGlobalReceiverCheck(Handle<JSObject> object,Handle<JSObject> holder,Handle<String> name,Label * miss)1386 void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
1387                                                    Handle<JSObject> holder,
1388                                                    Handle<String> name,
1389                                                    Label* miss) {
1390   ASSERT(holder->IsGlobalObject());
1391 
1392   // Get the number of arguments.
1393   const int argc = arguments().immediate();
1394 
1395   // Get the receiver from the stack.
1396   __ lw(a0, MemOperand(sp, argc * kPointerSize));
1397 
1398   // Check that the maps haven't changed.
1399   __ JumpIfSmi(a0, miss);
1400   CheckPrototypes(object, a0, holder, a3, a1, t0, name, miss);
1401 }
1402 
1403 
GenerateLoadFunctionFromCell(Handle<JSGlobalPropertyCell> cell,Handle<JSFunction> function,Label * miss)1404 void CallStubCompiler::GenerateLoadFunctionFromCell(
1405     Handle<JSGlobalPropertyCell> cell,
1406     Handle<JSFunction> function,
1407     Label* miss) {
1408   // Get the value from the cell.
1409   __ li(a3, Operand(cell));
1410   __ lw(a1, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
1411 
1412   // Check that the cell contains the same function.
1413   if (heap()->InNewSpace(*function)) {
1414     // We can't embed a pointer to a function in new space so we have
1415     // to verify that the shared function info is unchanged. This has
1416     // the nice side effect that multiple closures based on the same
1417     // function can all use this call IC. Before we load through the
1418     // function, we have to verify that it still is a function.
1419     __ JumpIfSmi(a1, miss);
1420     __ GetObjectType(a1, a3, a3);
1421     __ Branch(miss, ne, a3, Operand(JS_FUNCTION_TYPE));
1422 
1423     // Check the shared function info. Make sure it hasn't changed.
1424     __ li(a3, Handle<SharedFunctionInfo>(function->shared()));
1425     __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
1426     __ Branch(miss, ne, t0, Operand(a3));
1427   } else {
1428     __ Branch(miss, ne, a1, Operand(function));
1429   }
1430 }
1431 
1432 
GenerateMissBranch()1433 void CallStubCompiler::GenerateMissBranch() {
1434   Handle<Code> code =
1435       isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
1436                                                kind_,
1437                                                extra_state_);
1438   __ Jump(code, RelocInfo::CODE_TARGET);
1439 }
1440 
1441 
CompileCallField(Handle<JSObject> object,Handle<JSObject> holder,int index,Handle<String> name)1442 Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
1443                                                 Handle<JSObject> holder,
1444                                                 int index,
1445                                                 Handle<String> name) {
1446   // ----------- S t a t e -------------
1447   //  -- a2    : name
1448   //  -- ra    : return address
1449   // -----------------------------------
1450   Label miss;
1451 
1452   GenerateNameCheck(name, &miss);
1453 
1454   const int argc = arguments().immediate();
1455 
1456   // Get the receiver of the function from the stack into a0.
1457   __ lw(a0, MemOperand(sp, argc * kPointerSize));
1458   // Check that the receiver isn't a smi.
1459   __ JumpIfSmi(a0, &miss, t0);
1460 
1461   // Do the right check and compute the holder register.
1462   Register reg = CheckPrototypes(object, a0, holder, a1, a3, t0, name, &miss);
1463   GenerateFastPropertyLoad(masm(), a1, reg, holder, index);
1464 
1465   GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
1466 
1467   // Handle call cache miss.
1468   __ bind(&miss);
1469   GenerateMissBranch();
1470 
1471   // Return the generated code.
1472   return GetCode(FIELD, name);
1473 }
1474 
1475 
CompileArrayPushCall(Handle<Object> object,Handle<JSObject> holder,Handle<JSGlobalPropertyCell> cell,Handle<JSFunction> function,Handle<String> name)1476 Handle<Code> CallStubCompiler::CompileArrayPushCall(
1477     Handle<Object> object,
1478     Handle<JSObject> holder,
1479     Handle<JSGlobalPropertyCell> cell,
1480     Handle<JSFunction> function,
1481     Handle<String> name) {
1482   // ----------- S t a t e -------------
1483   //  -- a2    : name
1484   //  -- ra    : return address
1485   //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
1486   //  -- ...
1487   //  -- sp[argc * 4]           : receiver
1488   // -----------------------------------
1489 
1490   // If object is not an array, bail out to regular call.
1491   if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
1492 
1493   Label miss;
1494 
1495   GenerateNameCheck(name, &miss);
1496 
1497   Register receiver = a1;
1498 
1499   // Get the receiver from the stack.
1500   const int argc = arguments().immediate();
1501   __ lw(receiver, MemOperand(sp, argc * kPointerSize));
1502 
1503   // Check that the receiver isn't a smi.
1504   __ JumpIfSmi(receiver, &miss);
1505 
1506   // Check that the maps haven't changed.
1507   CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, a3, v0, t0,
1508                   name, &miss);
1509 
1510   if (argc == 0) {
1511     // Nothing to do, just return the length.
1512     __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1513     __ Drop(argc + 1);
1514     __ Ret();
1515   } else {
1516     Label call_builtin;
1517     if (argc == 1) {  // Otherwise fall through to call the builtin.
1518       Label attempt_to_grow_elements;
1519 
1520       Register elements = t2;
1521       Register end_elements = t1;
1522       // Get the elements array of the object.
1523       __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
1524 
1525       // Check that the elements are in fast mode and writable.
1526       __ CheckMap(elements,
1527                   v0,
1528                   Heap::kFixedArrayMapRootIndex,
1529                   &call_builtin,
1530                   DONT_DO_SMI_CHECK);
1531 
1532       // Get the array's length into v0 and calculate new length.
1533       __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1534       STATIC_ASSERT(kSmiTagSize == 1);
1535       STATIC_ASSERT(kSmiTag == 0);
1536       __ Addu(v0, v0, Operand(Smi::FromInt(argc)));
1537 
1538       // Get the elements' length.
1539       __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
1540 
1541       // Check if we could survive without allocation.
1542       __ Branch(&attempt_to_grow_elements, gt, v0, Operand(t0));
1543 
1544       // Check if value is a smi.
1545       Label with_write_barrier;
1546       __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
1547       __ JumpIfNotSmi(t0, &with_write_barrier);
1548 
1549       // Save new length.
1550       __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1551 
1552       // Store the value.
1553       // We may need a register containing the address end_elements below,
1554       // so write back the value in end_elements.
1555       __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
1556       __ Addu(end_elements, elements, end_elements);
1557       const int kEndElementsOffset =
1558           FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
1559       __ Addu(end_elements, end_elements, kEndElementsOffset);
1560       __ sw(t0, MemOperand(end_elements));
1561 
1562       // Check for a smi.
1563       __ Drop(argc + 1);
1564       __ Ret();
1565 
1566       __ bind(&with_write_barrier);
1567 
1568       __ lw(a3, FieldMemOperand(receiver, HeapObject::kMapOffset));
1569 
1570       if (FLAG_smi_only_arrays  && !FLAG_trace_elements_transitions) {
1571         Label fast_object, not_fast_object;
1572         __ CheckFastObjectElements(a3, t3, &not_fast_object);
1573         __ jmp(&fast_object);
1574         // In case of fast smi-only, convert to fast object, otherwise bail out.
1575         __ bind(&not_fast_object);
1576         __ CheckFastSmiOnlyElements(a3, t3, &call_builtin);
1577         // edx: receiver
1578         // r3: map
1579         __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
1580                                                FAST_ELEMENTS,
1581                                                a3,
1582                                                t3,
1583                                                &call_builtin);
1584         __ mov(a2, receiver);
1585         ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
1586         __ bind(&fast_object);
1587       } else {
1588         __ CheckFastObjectElements(a3, a3, &call_builtin);
1589       }
1590 
1591       // Save new length.
1592       __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1593 
1594       // Store the value.
1595       // We may need a register containing the address end_elements below,
1596       // so write back the value in end_elements.
1597       __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
1598       __ Addu(end_elements, elements, end_elements);
1599       __ Addu(end_elements, end_elements, kEndElementsOffset);
1600       __ sw(t0, MemOperand(end_elements));
1601 
1602       __ RecordWrite(elements,
1603                      end_elements,
1604                      t0,
1605                      kRAHasNotBeenSaved,
1606                      kDontSaveFPRegs,
1607                      EMIT_REMEMBERED_SET,
1608                      OMIT_SMI_CHECK);
1609       __ Drop(argc + 1);
1610       __ Ret();
1611 
1612       __ bind(&attempt_to_grow_elements);
1613       // v0: array's length + 1.
1614       // t0: elements' length.
1615 
1616       if (!FLAG_inline_new) {
1617         __ Branch(&call_builtin);
1618       }
1619 
1620       __ lw(a2, MemOperand(sp, (argc - 1) * kPointerSize));
1621       // Growing elements that are SMI-only requires special handling in case
1622       // the new element is non-Smi. For now, delegate to the builtin.
1623       Label no_fast_elements_check;
1624       __ JumpIfSmi(a2, &no_fast_elements_check);
1625       __ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
1626       __ CheckFastObjectElements(t3, t3, &call_builtin);
1627       __ bind(&no_fast_elements_check);
1628 
1629       ExternalReference new_space_allocation_top =
1630           ExternalReference::new_space_allocation_top_address(
1631               masm()->isolate());
1632       ExternalReference new_space_allocation_limit =
1633           ExternalReference::new_space_allocation_limit_address(
1634               masm()->isolate());
1635 
1636       const int kAllocationDelta = 4;
1637       // Load top and check if it is the end of elements.
1638       __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
1639       __ Addu(end_elements, elements, end_elements);
1640       __ Addu(end_elements, end_elements, Operand(kEndElementsOffset));
1641       __ li(t3, Operand(new_space_allocation_top));
1642       __ lw(a3, MemOperand(t3));
1643       __ Branch(&call_builtin, ne, end_elements, Operand(a3));
1644 
1645       __ li(t5, Operand(new_space_allocation_limit));
1646       __ lw(t5, MemOperand(t5));
1647       __ Addu(a3, a3, Operand(kAllocationDelta * kPointerSize));
1648       __ Branch(&call_builtin, hi, a3, Operand(t5));
1649 
1650       // We fit and could grow elements.
1651       // Update new_space_allocation_top.
1652       __ sw(a3, MemOperand(t3));
1653       // Push the argument.
1654       __ sw(a2, MemOperand(end_elements));
1655       // Fill the rest with holes.
1656       __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
1657       for (int i = 1; i < kAllocationDelta; i++) {
1658         __ sw(a3, MemOperand(end_elements, i * kPointerSize));
1659       }
1660 
1661       // Update elements' and array's sizes.
1662       __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1663       __ Addu(t0, t0, Operand(Smi::FromInt(kAllocationDelta)));
1664       __ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
1665 
1666       // Elements are in new space, so write barrier is not required.
1667       __ Drop(argc + 1);
1668       __ Ret();
1669     }
1670     __ bind(&call_builtin);
1671     __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
1672                                                    masm()->isolate()),
1673                                  argc + 1,
1674                                  1);
1675   }
1676 
1677   // Handle call cache miss.
1678   __ bind(&miss);
1679   GenerateMissBranch();
1680 
1681   // Return the generated code.
1682   return GetCode(function);
1683 }
1684 
1685 
CompileArrayPopCall(Handle<Object> object,Handle<JSObject> holder,Handle<JSGlobalPropertyCell> cell,Handle<JSFunction> function,Handle<String> name)1686 Handle<Code> CallStubCompiler::CompileArrayPopCall(
1687     Handle<Object> object,
1688     Handle<JSObject> holder,
1689     Handle<JSGlobalPropertyCell> cell,
1690     Handle<JSFunction> function,
1691     Handle<String> name) {
1692   // ----------- S t a t e -------------
1693   //  -- a2    : name
1694   //  -- ra    : return address
1695   //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
1696   //  -- ...
1697   //  -- sp[argc * 4]           : receiver
1698   // -----------------------------------
1699 
1700   // If object is not an array, bail out to regular call.
1701   if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
1702 
1703   Label miss, return_undefined, call_builtin;
1704   Register receiver = a1;
1705   Register elements = a3;
1706   GenerateNameCheck(name, &miss);
1707 
1708   // Get the receiver from the stack.
1709   const int argc = arguments().immediate();
1710   __ lw(receiver, MemOperand(sp, argc * kPointerSize));
1711   // Check that the receiver isn't a smi.
1712   __ JumpIfSmi(receiver, &miss);
1713 
1714   // Check that the maps haven't changed.
1715   CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements,
1716                   t0, v0, name, &miss);
1717 
1718   // Get the elements array of the object.
1719   __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
1720 
1721   // Check that the elements are in fast mode and writable.
1722   __ CheckMap(elements,
1723               v0,
1724               Heap::kFixedArrayMapRootIndex,
1725               &call_builtin,
1726               DONT_DO_SMI_CHECK);
1727 
1728   // Get the array's length into t0 and calculate new length.
1729   __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1730   __ Subu(t0, t0, Operand(Smi::FromInt(1)));
1731   __ Branch(&return_undefined, lt, t0, Operand(zero_reg));
1732 
1733   // Get the last element.
1734   __ LoadRoot(t2, Heap::kTheHoleValueRootIndex);
1735   STATIC_ASSERT(kSmiTagSize == 1);
1736   STATIC_ASSERT(kSmiTag == 0);
1737   // We can't address the last element in one operation. Compute the more
1738   // expensive shift first, and use an offset later on.
1739   __ sll(t1, t0, kPointerSizeLog2 - kSmiTagSize);
1740   __ Addu(elements, elements, t1);
1741   __ lw(v0, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
1742   __ Branch(&call_builtin, eq, v0, Operand(t2));
1743 
1744   // Set the array's length.
1745   __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1746 
1747   // Fill with the hole.
1748   __ sw(t2, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
1749   __ Drop(argc + 1);
1750   __ Ret();
1751 
1752   __ bind(&return_undefined);
1753   __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
1754   __ Drop(argc + 1);
1755   __ Ret();
1756 
1757   __ bind(&call_builtin);
1758   __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop,
1759                                                  masm()->isolate()),
1760                                argc + 1,
1761                                1);
1762 
1763   // Handle call cache miss.
1764   __ bind(&miss);
1765   GenerateMissBranch();
1766 
1767   // Return the generated code.
1768   return GetCode(function);
1769 }
1770 
1771 
CompileStringCharCodeAtCall(Handle<Object> object,Handle<JSObject> holder,Handle<JSGlobalPropertyCell> cell,Handle<JSFunction> function,Handle<String> name)1772 Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
1773     Handle<Object> object,
1774     Handle<JSObject> holder,
1775     Handle<JSGlobalPropertyCell> cell,
1776     Handle<JSFunction> function,
1777     Handle<String> name) {
1778   // ----------- S t a t e -------------
1779   //  -- a2                     : function name
1780   //  -- ra                     : return address
1781   //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
1782   //  -- ...
1783   //  -- sp[argc * 4]           : receiver
1784   // -----------------------------------
1785 
1786   // If object is not a string, bail out to regular call.
1787   if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
1788 
1789   const int argc = arguments().immediate();
1790   Label miss;
1791   Label name_miss;
1792   Label index_out_of_range;
1793 
1794   Label* index_out_of_range_label = &index_out_of_range;
1795 
1796   if (kind_ == Code::CALL_IC &&
1797       (CallICBase::StringStubState::decode(extra_state_) ==
1798        DEFAULT_STRING_STUB)) {
1799     index_out_of_range_label = &miss;
1800   }
1801 
1802   GenerateNameCheck(name, &name_miss);
1803 
1804   // Check that the maps starting from the prototype haven't changed.
1805   GenerateDirectLoadGlobalFunctionPrototype(masm(),
1806                                             Context::STRING_FUNCTION_INDEX,
1807                                             v0,
1808                                             &miss);
1809   ASSERT(!object.is_identical_to(holder));
1810   CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
1811                   v0, holder, a1, a3, t0, name, &miss);
1812 
1813   Register receiver = a1;
1814   Register index = t1;
1815   Register result = v0;
1816   __ lw(receiver, MemOperand(sp, argc * kPointerSize));
1817   if (argc > 0) {
1818     __ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
1819   } else {
1820     __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
1821   }
1822 
1823   StringCharCodeAtGenerator generator(receiver,
1824                                       index,
1825                                       result,
1826                                       &miss,  // When not a string.
1827                                       &miss,  // When not a number.
1828                                       index_out_of_range_label,
1829                                       STRING_INDEX_IS_NUMBER);
1830   generator.GenerateFast(masm());
1831   __ Drop(argc + 1);
1832   __ Ret();
1833 
1834   StubRuntimeCallHelper call_helper;
1835   generator.GenerateSlow(masm(), call_helper);
1836 
1837   if (index_out_of_range.is_linked()) {
1838     __ bind(&index_out_of_range);
1839     __ LoadRoot(v0, Heap::kNanValueRootIndex);
1840     __ Drop(argc + 1);
1841     __ Ret();
1842   }
1843 
1844   __ bind(&miss);
1845   // Restore function name in a2.
1846   __ li(a2, name);
1847   __ bind(&name_miss);
1848   GenerateMissBranch();
1849 
1850   // Return the generated code.
1851   return GetCode(function);
1852 }
1853 
1854 
CompileStringCharAtCall(Handle<Object> object,Handle<JSObject> holder,Handle<JSGlobalPropertyCell> cell,Handle<JSFunction> function,Handle<String> name)1855 Handle<Code> CallStubCompiler::CompileStringCharAtCall(
1856     Handle<Object> object,
1857     Handle<JSObject> holder,
1858     Handle<JSGlobalPropertyCell> cell,
1859     Handle<JSFunction> function,
1860     Handle<String> name) {
1861   // ----------- S t a t e -------------
1862   //  -- a2                     : function name
1863   //  -- ra                     : return address
1864   //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
1865   //  -- ...
1866   //  -- sp[argc * 4]           : receiver
1867   // -----------------------------------
1868 
1869   // If object is not a string, bail out to regular call.
1870   if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
1871 
1872   const int argc = arguments().immediate();
1873   Label miss;
1874   Label name_miss;
1875   Label index_out_of_range;
1876   Label* index_out_of_range_label = &index_out_of_range;
1877   if (kind_ == Code::CALL_IC &&
1878       (CallICBase::StringStubState::decode(extra_state_) ==
1879        DEFAULT_STRING_STUB)) {
1880     index_out_of_range_label = &miss;
1881   }
1882   GenerateNameCheck(name, &name_miss);
1883 
1884   // Check that the maps starting from the prototype haven't changed.
1885   GenerateDirectLoadGlobalFunctionPrototype(masm(),
1886                                             Context::STRING_FUNCTION_INDEX,
1887                                             v0,
1888                                             &miss);
1889   ASSERT(!object.is_identical_to(holder));
1890   CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
1891                   v0, holder, a1, a3, t0, name, &miss);
1892 
1893   Register receiver = v0;
1894   Register index = t1;
1895   Register scratch = a3;
1896   Register result = v0;
1897   __ lw(receiver, MemOperand(sp, argc * kPointerSize));
1898   if (argc > 0) {
1899     __ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
1900   } else {
1901     __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
1902   }
1903 
1904   StringCharAtGenerator generator(receiver,
1905                                   index,
1906                                   scratch,
1907                                   result,
1908                                   &miss,  // When not a string.
1909                                   &miss,  // When not a number.
1910                                   index_out_of_range_label,
1911                                   STRING_INDEX_IS_NUMBER);
1912   generator.GenerateFast(masm());
1913   __ Drop(argc + 1);
1914   __ Ret();
1915 
1916   StubRuntimeCallHelper call_helper;
1917   generator.GenerateSlow(masm(), call_helper);
1918 
1919   if (index_out_of_range.is_linked()) {
1920     __ bind(&index_out_of_range);
1921     __ LoadRoot(v0, Heap::kEmptyStringRootIndex);
1922     __ Drop(argc + 1);
1923     __ Ret();
1924   }
1925 
1926   __ bind(&miss);
1927   // Restore function name in a2.
1928   __ li(a2, name);
1929   __ bind(&name_miss);
1930   GenerateMissBranch();
1931 
1932   // Return the generated code.
1933   return GetCode(function);
1934 }
1935 
1936 
CompileStringFromCharCodeCall(Handle<Object> object,Handle<JSObject> holder,Handle<JSGlobalPropertyCell> cell,Handle<JSFunction> function,Handle<String> name)1937 Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
1938     Handle<Object> object,
1939     Handle<JSObject> holder,
1940     Handle<JSGlobalPropertyCell> cell,
1941     Handle<JSFunction> function,
1942     Handle<String> name) {
1943   // ----------- S t a t e -------------
1944   //  -- a2                     : function name
1945   //  -- ra                     : return address
1946   //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
1947   //  -- ...
1948   //  -- sp[argc * 4]           : receiver
1949   // -----------------------------------
1950 
1951   const int argc = arguments().immediate();
1952 
1953   // If the object is not a JSObject or we got an unexpected number of
1954   // arguments, bail out to the regular call.
1955   if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
1956 
1957   Label miss;
1958   GenerateNameCheck(name, &miss);
1959 
1960   if (cell.is_null()) {
1961     __ lw(a1, MemOperand(sp, 1 * kPointerSize));
1962 
1963     STATIC_ASSERT(kSmiTag == 0);
1964     __ JumpIfSmi(a1, &miss);
1965 
1966     CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, v0, a3, t0,
1967                     name, &miss);
1968   } else {
1969     ASSERT(cell->value() == *function);
1970     GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
1971                                 &miss);
1972     GenerateLoadFunctionFromCell(cell, function, &miss);
1973   }
1974 
1975   // Load the char code argument.
1976   Register code = a1;
1977   __ lw(code, MemOperand(sp, 0 * kPointerSize));
1978 
1979   // Check the code is a smi.
1980   Label slow;
1981   STATIC_ASSERT(kSmiTag == 0);
1982   __ JumpIfNotSmi(code, &slow);
1983 
1984   // Convert the smi code to uint16.
1985   __ And(code, code, Operand(Smi::FromInt(0xffff)));
1986 
1987   StringCharFromCodeGenerator generator(code, v0);
1988   generator.GenerateFast(masm());
1989   __ Drop(argc + 1);
1990   __ Ret();
1991 
1992   StubRuntimeCallHelper call_helper;
1993   generator.GenerateSlow(masm(), call_helper);
1994 
1995   // Tail call the full function. We do not have to patch the receiver
1996   // because the function makes no use of it.
1997   __ bind(&slow);
1998   __ InvokeFunction(
1999       function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
2000 
2001   __ bind(&miss);
2002   // a2: function name.
2003   GenerateMissBranch();
2004 
2005   // Return the generated code.
2006   return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
2007 }
2008 
2009 
CompileMathFloorCall(Handle<Object> object,Handle<JSObject> holder,Handle<JSGlobalPropertyCell> cell,Handle<JSFunction> function,Handle<String> name)2010 Handle<Code> CallStubCompiler::CompileMathFloorCall(
2011     Handle<Object> object,
2012     Handle<JSObject> holder,
2013     Handle<JSGlobalPropertyCell> cell,
2014     Handle<JSFunction> function,
2015     Handle<String> name) {
2016   // ----------- S t a t e -------------
2017   //  -- a2                     : function name
2018   //  -- ra                     : return address
2019   //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
2020   //  -- ...
2021   //  -- sp[argc * 4]           : receiver
2022   // -----------------------------------
2023 
2024   if (!CpuFeatures::IsSupported(FPU)) {
2025     return Handle<Code>::null();
2026   }
2027 
2028   CpuFeatures::Scope scope_fpu(FPU);
2029   const int argc = arguments().immediate();
2030   // If the object is not a JSObject or we got an unexpected number of
2031   // arguments, bail out to the regular call.
2032   if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
2033 
2034   Label miss, slow;
2035   GenerateNameCheck(name, &miss);
2036 
2037   if (cell.is_null()) {
2038     __ lw(a1, MemOperand(sp, 1 * kPointerSize));
2039     STATIC_ASSERT(kSmiTag == 0);
2040     __ JumpIfSmi(a1, &miss);
2041     CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0,
2042                     name, &miss);
2043   } else {
2044     ASSERT(cell->value() == *function);
2045     GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
2046                                 &miss);
2047     GenerateLoadFunctionFromCell(cell, function, &miss);
2048   }
2049 
2050   // Load the (only) argument into v0.
2051   __ lw(v0, MemOperand(sp, 0 * kPointerSize));
2052 
2053   // If the argument is a smi, just return.
2054   STATIC_ASSERT(kSmiTag == 0);
2055   __ And(t0, v0, Operand(kSmiTagMask));
2056   __ Drop(argc + 1, eq, t0, Operand(zero_reg));
2057   __ Ret(eq, t0, Operand(zero_reg));
2058 
2059   __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
2060 
2061   Label wont_fit_smi, no_fpu_error, restore_fcsr_and_return;
2062 
2063   // If fpu is enabled, we use the floor instruction.
2064 
2065   // Load the HeapNumber value.
2066   __ ldc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
2067 
2068   // Backup FCSR.
2069   __ cfc1(a3, FCSR);
2070   // Clearing FCSR clears the exception mask with no side-effects.
2071   __ ctc1(zero_reg, FCSR);
2072   // Convert the argument to an integer.
2073   __ floor_w_d(f0, f0);
2074 
2075   // Start checking for special cases.
2076   // Get the argument exponent and clear the sign bit.
2077   __ lw(t1, FieldMemOperand(v0, HeapNumber::kValueOffset + kPointerSize));
2078   __ And(t2, t1, Operand(~HeapNumber::kSignMask));
2079   __ srl(t2, t2, HeapNumber::kMantissaBitsInTopWord);
2080 
2081   // Retrieve FCSR and check for fpu errors.
2082   __ cfc1(t5, FCSR);
2083   __ And(t5, t5, Operand(kFCSRExceptionFlagMask));
2084   __ Branch(&no_fpu_error, eq, t5, Operand(zero_reg));
2085 
2086   // Check for NaN, Infinity, and -Infinity.
2087   // They are invariant through a Math.Floor call, so just
2088   // return the original argument.
2089   __ Subu(t3, t2, Operand(HeapNumber::kExponentMask
2090         >> HeapNumber::kMantissaBitsInTopWord));
2091   __ Branch(&restore_fcsr_and_return, eq, t3, Operand(zero_reg));
2092   // We had an overflow or underflow in the conversion. Check if we
2093   // have a big exponent.
2094   // If greater or equal, the argument is already round and in v0.
2095   __ Branch(&restore_fcsr_and_return, ge, t3,
2096       Operand(HeapNumber::kMantissaBits));
2097   __ Branch(&wont_fit_smi);
2098 
2099   __ bind(&no_fpu_error);
2100   // Move the result back to v0.
2101   __ mfc1(v0, f0);
2102   // Check if the result fits into a smi.
2103   __ Addu(a1, v0, Operand(0x40000000));
2104   __ Branch(&wont_fit_smi, lt, a1, Operand(zero_reg));
2105   // Tag the result.
2106   STATIC_ASSERT(kSmiTag == 0);
2107   __ sll(v0, v0, kSmiTagSize);
2108 
2109   // Check for -0.
2110   __ Branch(&restore_fcsr_and_return, ne, v0, Operand(zero_reg));
2111   // t1 already holds the HeapNumber exponent.
2112   __ And(t0, t1, Operand(HeapNumber::kSignMask));
2113   // If our HeapNumber is negative it was -0, so load its address and return.
2114   // Else v0 is loaded with 0, so we can also just return.
2115   __ Branch(&restore_fcsr_and_return, eq, t0, Operand(zero_reg));
2116   __ lw(v0, MemOperand(sp, 0 * kPointerSize));
2117 
2118   __ bind(&restore_fcsr_and_return);
2119   // Restore FCSR and return.
2120   __ ctc1(a3, FCSR);
2121 
2122   __ Drop(argc + 1);
2123   __ Ret();
2124 
2125   __ bind(&wont_fit_smi);
2126   // Restore FCSR and fall to slow case.
2127   __ ctc1(a3, FCSR);
2128 
2129   __ bind(&slow);
2130   // Tail call the full function. We do not have to patch the receiver
2131   // because the function makes no use of it.
2132   __ InvokeFunction(
2133       function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
2134 
2135   __ bind(&miss);
2136   // a2: function name.
2137   GenerateMissBranch();
2138 
2139   // Return the generated code.
2140   return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
2141 }
2142 
2143 
CompileMathAbsCall(Handle<Object> object,Handle<JSObject> holder,Handle<JSGlobalPropertyCell> cell,Handle<JSFunction> function,Handle<String> name)2144 Handle<Code> CallStubCompiler::CompileMathAbsCall(
2145     Handle<Object> object,
2146     Handle<JSObject> holder,
2147     Handle<JSGlobalPropertyCell> cell,
2148     Handle<JSFunction> function,
2149     Handle<String> name) {
2150   // ----------- S t a t e -------------
2151   //  -- a2                     : function name
2152   //  -- ra                     : return address
2153   //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
2154   //  -- ...
2155   //  -- sp[argc * 4]           : receiver
2156   // -----------------------------------
2157 
2158   const int argc = arguments().immediate();
2159   // If the object is not a JSObject or we got an unexpected number of
2160   // arguments, bail out to the regular call.
2161   if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
2162 
2163   Label miss;
2164 
2165   GenerateNameCheck(name, &miss);
2166   if (cell.is_null()) {
2167     __ lw(a1, MemOperand(sp, 1 * kPointerSize));
2168     STATIC_ASSERT(kSmiTag == 0);
2169     __ JumpIfSmi(a1, &miss);
2170     CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, v0, a3, t0,
2171                     name, &miss);
2172   } else {
2173     ASSERT(cell->value() == *function);
2174     GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
2175                                 &miss);
2176     GenerateLoadFunctionFromCell(cell, function, &miss);
2177   }
2178 
2179   // Load the (only) argument into v0.
2180   __ lw(v0, MemOperand(sp, 0 * kPointerSize));
2181 
2182   // Check if the argument is a smi.
2183   Label not_smi;
2184   STATIC_ASSERT(kSmiTag == 0);
2185   __ JumpIfNotSmi(v0, &not_smi);
2186 
2187   // Do bitwise not or do nothing depending on the sign of the
2188   // argument.
2189   __ sra(t0, v0, kBitsPerInt - 1);
2190   __ Xor(a1, v0, t0);
2191 
2192   // Add 1 or do nothing depending on the sign of the argument.
2193   __ Subu(v0, a1, t0);
2194 
2195   // If the result is still negative, go to the slow case.
2196   // This only happens for the most negative smi.
2197   Label slow;
2198   __ Branch(&slow, lt, v0, Operand(zero_reg));
2199 
2200   // Smi case done.
2201   __ Drop(argc + 1);
2202   __ Ret();
2203 
2204   // Check if the argument is a heap number and load its exponent and
2205   // sign.
2206   __ bind(&not_smi);
2207   __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
2208   __ lw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
2209 
2210   // Check the sign of the argument. If the argument is positive,
2211   // just return it.
2212   Label negative_sign;
2213   __ And(t0, a1, Operand(HeapNumber::kSignMask));
2214   __ Branch(&negative_sign, ne, t0, Operand(zero_reg));
2215   __ Drop(argc + 1);
2216   __ Ret();
2217 
2218   // If the argument is negative, clear the sign, and return a new
2219   // number.
2220   __ bind(&negative_sign);
2221   __ Xor(a1, a1, Operand(HeapNumber::kSignMask));
2222   __ lw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
2223   __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
2224   __ AllocateHeapNumber(v0, t0, t1, t2, &slow);
2225   __ sw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
2226   __ sw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
2227   __ Drop(argc + 1);
2228   __ Ret();
2229 
2230   // Tail call the full function. We do not have to patch the receiver
2231   // because the function makes no use of it.
2232   __ bind(&slow);
2233   __ InvokeFunction(
2234       function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
2235 
2236   __ bind(&miss);
2237   // a2: function name.
2238   GenerateMissBranch();
2239 
2240   // Return the generated code.
2241   return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
2242 }
2243 
2244 
CompileFastApiCall(const CallOptimization & optimization,Handle<Object> object,Handle<JSObject> holder,Handle<JSGlobalPropertyCell> cell,Handle<JSFunction> function,Handle<String> name)2245 Handle<Code> CallStubCompiler::CompileFastApiCall(
2246     const CallOptimization& optimization,
2247     Handle<Object> object,
2248     Handle<JSObject> holder,
2249     Handle<JSGlobalPropertyCell> cell,
2250     Handle<JSFunction> function,
2251     Handle<String> name) {
2252 
2253   Counters* counters = isolate()->counters();
2254 
2255   ASSERT(optimization.is_simple_api_call());
2256   // Bail out if object is a global object as we don't want to
2257   // repatch it to global receiver.
2258   if (object->IsGlobalObject()) return Handle<Code>::null();
2259   if (!cell.is_null()) return Handle<Code>::null();
2260   if (!object->IsJSObject()) return Handle<Code>::null();
2261   int depth = optimization.GetPrototypeDepthOfExpectedType(
2262       Handle<JSObject>::cast(object), holder);
2263   if (depth == kInvalidProtoDepth) return Handle<Code>::null();
2264 
2265   Label miss, miss_before_stack_reserved;
2266 
2267   GenerateNameCheck(name, &miss_before_stack_reserved);
2268 
2269   // Get the receiver from the stack.
2270   const int argc = arguments().immediate();
2271   __ lw(a1, MemOperand(sp, argc * kPointerSize));
2272 
2273   // Check that the receiver isn't a smi.
2274   __ JumpIfSmi(a1, &miss_before_stack_reserved);
2275 
2276   __ IncrementCounter(counters->call_const(), 1, a0, a3);
2277   __ IncrementCounter(counters->call_const_fast_api(), 1, a0, a3);
2278 
2279   ReserveSpaceForFastApiCall(masm(), a0);
2280 
2281   // Check that the maps haven't changed and find a Holder as a side effect.
2282   CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0, name,
2283                   depth, &miss);
2284 
2285   GenerateFastApiDirectCall(masm(), optimization, argc);
2286 
2287   __ bind(&miss);
2288   FreeSpaceForFastApiCall(masm());
2289 
2290   __ bind(&miss_before_stack_reserved);
2291   GenerateMissBranch();
2292 
2293   // Return the generated code.
2294   return GetCode(function);
2295 }
2296 
2297 
CompileCallConstant(Handle<Object> object,Handle<JSObject> holder,Handle<JSFunction> function,Handle<String> name,CheckType check)2298 Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
2299                                                    Handle<JSObject> holder,
2300                                                    Handle<JSFunction> function,
2301                                                    Handle<String> name,
2302                                                    CheckType check) {
2303   // ----------- S t a t e -------------
2304   //  -- a2    : name
2305   //  -- ra    : return address
2306   // -----------------------------------
2307   if (HasCustomCallGenerator(function)) {
2308     Handle<Code> code = CompileCustomCall(object, holder,
2309                                           Handle<JSGlobalPropertyCell>::null(),
2310                                           function, name);
2311     // A null handle means bail out to the regular compiler code below.
2312     if (!code.is_null()) return code;
2313   }
2314 
2315   Label miss;
2316 
2317   GenerateNameCheck(name, &miss);
2318 
2319   // Get the receiver from the stack.
2320   const int argc = arguments().immediate();
2321   __ lw(a1, MemOperand(sp, argc * kPointerSize));
2322 
2323   // Check that the receiver isn't a smi.
2324   if (check != NUMBER_CHECK) {
2325     __ JumpIfSmi(a1, &miss);
2326   }
2327 
2328   // Make sure that it's okay not to patch the on stack receiver
2329   // unless we're doing a receiver map check.
2330   ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
2331   switch (check) {
2332     case RECEIVER_MAP_CHECK:
2333       __ IncrementCounter(masm()->isolate()->counters()->call_const(),
2334           1, a0, a3);
2335 
2336       // Check that the maps haven't changed.
2337       CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0,
2338                       name, &miss);
2339 
2340       // Patch the receiver on the stack with the global proxy if
2341       // necessary.
2342       if (object->IsGlobalObject()) {
2343         __ lw(a3, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
2344         __ sw(a3, MemOperand(sp, argc * kPointerSize));
2345       }
2346       break;
2347 
2348     case STRING_CHECK:
2349       if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
2350         // Check that the object is a two-byte string or a symbol.
2351         __ GetObjectType(a1, a3, a3);
2352         __ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
2353         // Check that the maps starting from the prototype haven't changed.
2354         GenerateDirectLoadGlobalFunctionPrototype(
2355             masm(), Context::STRING_FUNCTION_INDEX, a0, &miss);
2356         CheckPrototypes(
2357             Handle<JSObject>(JSObject::cast(object->GetPrototype())),
2358             a0, holder, a3, a1, t0, name, &miss);
2359       } else {
2360         // Calling non-strict non-builtins with a value as the receiver
2361         // requires boxing.
2362         __ jmp(&miss);
2363       }
2364       break;
2365 
2366     case NUMBER_CHECK:
2367       if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
2368       Label fast;
2369         // Check that the object is a smi or a heap number.
2370         __ JumpIfSmi(a1, &fast);
2371         __ GetObjectType(a1, a0, a0);
2372         __ Branch(&miss, ne, a0, Operand(HEAP_NUMBER_TYPE));
2373         __ bind(&fast);
2374         // Check that the maps starting from the prototype haven't changed.
2375         GenerateDirectLoadGlobalFunctionPrototype(
2376             masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss);
2377         CheckPrototypes(
2378             Handle<JSObject>(JSObject::cast(object->GetPrototype())),
2379             a0, holder, a3, a1, t0, name, &miss);
2380       } else {
2381         // Calling non-strict non-builtins with a value as the receiver
2382         // requires boxing.
2383         __ jmp(&miss);
2384       }
2385       break;
2386 
2387     case BOOLEAN_CHECK:
2388       if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
2389         Label fast;
2390         // Check that the object is a boolean.
2391         __ LoadRoot(t0, Heap::kTrueValueRootIndex);
2392         __ Branch(&fast, eq, a1, Operand(t0));
2393         __ LoadRoot(t0, Heap::kFalseValueRootIndex);
2394         __ Branch(&miss, ne, a1, Operand(t0));
2395         __ bind(&fast);
2396         // Check that the maps starting from the prototype haven't changed.
2397         GenerateDirectLoadGlobalFunctionPrototype(
2398             masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss);
2399         CheckPrototypes(
2400             Handle<JSObject>(JSObject::cast(object->GetPrototype())),
2401             a0, holder, a3, a1, t0, name, &miss);
2402       } else {
2403         // Calling non-strict non-builtins with a value as the receiver
2404         // requires boxing.
2405         __ jmp(&miss);
2406       }
2407       break;
2408     }
2409 
2410   CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
2411       ? CALL_AS_FUNCTION
2412       : CALL_AS_METHOD;
2413   __ InvokeFunction(
2414       function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind);
2415 
2416   // Handle call cache miss.
2417   __ bind(&miss);
2418 
2419   GenerateMissBranch();
2420 
2421   // Return the generated code.
2422   return GetCode(function);
2423 }
2424 
2425 
CompileCallInterceptor(Handle<JSObject> object,Handle<JSObject> holder,Handle<String> name)2426 Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
2427                                                       Handle<JSObject> holder,
2428                                                       Handle<String> name) {
2429   // ----------- S t a t e -------------
2430   //  -- a2    : name
2431   //  -- ra    : return address
2432   // -----------------------------------
2433 
2434   Label miss;
2435 
2436   GenerateNameCheck(name, &miss);
2437 
2438   // Get the number of arguments.
2439   const int argc = arguments().immediate();
2440   LookupResult lookup(isolate());
2441   LookupPostInterceptor(holder, name, &lookup);
2442 
2443   // Get the receiver from the stack.
2444   __ lw(a1, MemOperand(sp, argc * kPointerSize));
2445 
2446   CallInterceptorCompiler compiler(this, arguments(), a2, extra_state_);
2447   compiler.Compile(masm(), object, holder, name, &lookup, a1, a3, t0, a0,
2448                    &miss);
2449 
2450   // Move returned value, the function to call, to a1.
2451   __ mov(a1, v0);
2452   // Restore receiver.
2453   __ lw(a0, MemOperand(sp, argc * kPointerSize));
2454 
2455   GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
2456 
2457   // Handle call cache miss.
2458   __ bind(&miss);
2459   GenerateMissBranch();
2460 
2461   // Return the generated code.
2462   return GetCode(INTERCEPTOR, name);
2463 }
2464 
2465 
CompileCallGlobal(Handle<JSObject> object,Handle<GlobalObject> holder,Handle<JSGlobalPropertyCell> cell,Handle<JSFunction> function,Handle<String> name)2466 Handle<Code> CallStubCompiler::CompileCallGlobal(
2467     Handle<JSObject> object,
2468     Handle<GlobalObject> holder,
2469     Handle<JSGlobalPropertyCell> cell,
2470     Handle<JSFunction> function,
2471     Handle<String> name) {
2472   // ----------- S t a t e -------------
2473   //  -- a2    : name
2474   //  -- ra    : return address
2475   // -----------------------------------
2476 
2477   if (HasCustomCallGenerator(function)) {
2478     Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
2479     // A null handle means bail out to the regular compiler code below.
2480     if (!code.is_null()) return code;
2481   }
2482 
2483   Label miss;
2484   GenerateNameCheck(name, &miss);
2485 
2486   // Get the number of arguments.
2487   const int argc = arguments().immediate();
2488   GenerateGlobalReceiverCheck(object, holder, name, &miss);
2489   GenerateLoadFunctionFromCell(cell, function, &miss);
2490 
2491   // Patch the receiver on the stack with the global proxy if
2492   // necessary.
2493   if (object->IsGlobalObject()) {
2494     __ lw(a3, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
2495     __ sw(a3, MemOperand(sp, argc * kPointerSize));
2496   }
2497 
2498   // Set up the context (function already in r1).
2499   __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
2500 
2501   // Jump to the cached code (tail call).
2502   Counters* counters = masm()->isolate()->counters();
2503   __ IncrementCounter(counters->call_global_inline(), 1, a3, t0);
2504   ParameterCount expected(function->shared()->formal_parameter_count());
2505   CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
2506       ? CALL_AS_FUNCTION
2507       : CALL_AS_METHOD;
2508   // We call indirectly through the code field in the function to
2509   // allow recompilation to take effect without changing any of the
2510   // call sites.
2511   __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
2512   __ InvokeCode(a3, expected, arguments(), JUMP_FUNCTION,
2513                 NullCallWrapper(), call_kind);
2514 
2515   // Handle call cache miss.
2516   __ bind(&miss);
2517   __ IncrementCounter(counters->call_global_inline_miss(), 1, a1, a3);
2518   GenerateMissBranch();
2519 
2520   // Return the generated code.
2521   return GetCode(NORMAL, name);
2522 }
2523 
2524 
CompileStoreField(Handle<JSObject> object,int index,Handle<Map> transition,Handle<String> name)2525 Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
2526                                                   int index,
2527                                                   Handle<Map> transition,
2528                                                   Handle<String> name) {
2529   // ----------- S t a t e -------------
2530   //  -- a0    : value
2531   //  -- a1    : receiver
2532   //  -- a2    : name
2533   //  -- ra    : return address
2534   // -----------------------------------
2535   Label miss;
2536 
2537   // Name register might be clobbered.
2538   GenerateStoreField(masm(), object, index, transition, a1, a2, a3, &miss);
2539   __ bind(&miss);
2540   __ li(a2, Operand(Handle<String>(name)));  // Restore name.
2541   Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
2542   __ Jump(ic, RelocInfo::CODE_TARGET);
2543 
2544   // Return the generated code.
2545   return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
2546 }
2547 
2548 
CompileStoreCallback(Handle<JSObject> object,Handle<AccessorInfo> callback,Handle<String> name)2549 Handle<Code> StoreStubCompiler::CompileStoreCallback(
2550     Handle<JSObject> object,
2551     Handle<AccessorInfo> callback,
2552     Handle<String> name) {
2553   // ----------- S t a t e -------------
2554   //  -- a0    : value
2555   //  -- a1    : receiver
2556   //  -- a2    : name
2557   //  -- ra    : return address
2558   // -----------------------------------
2559   Label miss;
2560 
2561   // Check that the map of the object hasn't changed.
2562   __ CheckMap(a1, a3, Handle<Map>(object->map()), &miss,
2563               DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
2564 
2565   // Perform global security token check if needed.
2566   if (object->IsJSGlobalProxy()) {
2567     __ CheckAccessGlobalProxy(a1, a3, &miss);
2568   }
2569 
2570   // Stub never generated for non-global objects that require access
2571   // checks.
2572   ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
2573 
2574   __ push(a1);  // Receiver.
2575   __ li(a3, Operand(callback));  // Callback info.
2576   __ Push(a3, a2, a0);
2577 
2578   // Do tail-call to the runtime system.
2579   ExternalReference store_callback_property =
2580       ExternalReference(IC_Utility(IC::kStoreCallbackProperty),
2581           masm()->isolate());
2582   __ TailCallExternalReference(store_callback_property, 4, 1);
2583 
2584   // Handle store cache miss.
2585   __ bind(&miss);
2586   Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
2587   __ Jump(ic, RelocInfo::CODE_TARGET);
2588 
2589   // Return the generated code.
2590   return GetCode(CALLBACKS, name);
2591 }
2592 
2593 
CompileStoreInterceptor(Handle<JSObject> receiver,Handle<String> name)2594 Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
2595     Handle<JSObject> receiver,
2596     Handle<String> name) {
2597   // ----------- S t a t e -------------
2598   //  -- a0    : value
2599   //  -- a1    : receiver
2600   //  -- a2    : name
2601   //  -- ra    : return address
2602   // -----------------------------------
2603   Label miss;
2604 
2605   // Check that the map of the object hasn't changed.
2606   __ CheckMap(a1, a3, Handle<Map>(receiver->map()), &miss,
2607               DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
2608 
2609   // Perform global security token check if needed.
2610   if (receiver->IsJSGlobalProxy()) {
2611     __ CheckAccessGlobalProxy(a1, a3, &miss);
2612   }
2613 
2614   // Stub is never generated for non-global objects that require access
2615   // checks.
2616   ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
2617 
2618   __ Push(a1, a2, a0);  // Receiver, name, value.
2619 
2620   __ li(a0, Operand(Smi::FromInt(strict_mode_)));
2621   __ push(a0);  // Strict mode.
2622 
2623   // Do tail-call to the runtime system.
2624   ExternalReference store_ic_property =
2625       ExternalReference(IC_Utility(IC::kStoreInterceptorProperty),
2626           masm()->isolate());
2627   __ TailCallExternalReference(store_ic_property, 4, 1);
2628 
2629   // Handle store cache miss.
2630   __ bind(&miss);
2631   Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
2632   __ Jump(ic, RelocInfo::CODE_TARGET);
2633 
2634   // Return the generated code.
2635   return GetCode(INTERCEPTOR, name);
2636 }
2637 
2638 
CompileStoreGlobal(Handle<GlobalObject> object,Handle<JSGlobalPropertyCell> cell,Handle<String> name)2639 Handle<Code> StoreStubCompiler::CompileStoreGlobal(
2640     Handle<GlobalObject> object,
2641     Handle<JSGlobalPropertyCell> cell,
2642     Handle<String> name) {
2643   // ----------- S t a t e -------------
2644   //  -- a0    : value
2645   //  -- a1    : receiver
2646   //  -- a2    : name
2647   //  -- ra    : return address
2648   // -----------------------------------
2649   Label miss;
2650 
2651   // Check that the map of the global has not changed.
2652   __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
2653   __ Branch(&miss, ne, a3, Operand(Handle<Map>(object->map())));
2654 
2655   // Check that the value in the cell is not the hole. If it is, this
2656   // cell could have been deleted and reintroducing the global needs
2657   // to update the property details in the property dictionary of the
2658   // global object. We bail out to the runtime system to do that.
2659   __ li(t0, Operand(cell));
2660   __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
2661   __ lw(t2, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
2662   __ Branch(&miss, eq, t1, Operand(t2));
2663 
2664   // Store the value in the cell.
2665   __ sw(a0, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
2666   __ mov(v0, a0);  // Stored value must be returned in v0.
2667   // Cells are always rescanned, so no write barrier here.
2668 
2669   Counters* counters = masm()->isolate()->counters();
2670   __ IncrementCounter(counters->named_store_global_inline(), 1, a1, a3);
2671   __ Ret();
2672 
2673   // Handle store cache miss.
2674   __ bind(&miss);
2675   __ IncrementCounter(counters->named_store_global_inline_miss(), 1, a1, a3);
2676   Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
2677   __ Jump(ic, RelocInfo::CODE_TARGET);
2678 
2679   // Return the generated code.
2680   return GetCode(NORMAL, name);
2681 }
2682 
2683 
CompileLoadNonexistent(Handle<String> name,Handle<JSObject> object,Handle<JSObject> last)2684 Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
2685                                                       Handle<JSObject> object,
2686                                                       Handle<JSObject> last) {
2687   // ----------- S t a t e -------------
2688   //  -- a0    : receiver
2689   //  -- ra    : return address
2690   // -----------------------------------
2691   Label miss;
2692 
2693   // Check that the receiver is not a smi.
2694   __ JumpIfSmi(a0, &miss);
2695 
2696   // Check the maps of the full prototype chain.
2697   CheckPrototypes(object, a0, last, a3, a1, t0, name, &miss);
2698 
2699   // If the last object in the prototype chain is a global object,
2700   // check that the global property cell is empty.
2701   if (last->IsGlobalObject()) {
2702     GenerateCheckPropertyCell(
2703         masm(), Handle<GlobalObject>::cast(last), name, a1, &miss);
2704   }
2705 
2706   // Return undefined if maps of the full prototype chain is still the same.
2707   __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
2708   __ Ret();
2709 
2710   __ bind(&miss);
2711   GenerateLoadMiss(masm(), Code::LOAD_IC);
2712 
2713   // Return the generated code.
2714   return GetCode(NONEXISTENT, factory()->empty_string());
2715 }
2716 
2717 
CompileLoadField(Handle<JSObject> object,Handle<JSObject> holder,int index,Handle<String> name)2718 Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
2719                                                 Handle<JSObject> holder,
2720                                                 int index,
2721                                                 Handle<String> name) {
2722   // ----------- S t a t e -------------
2723   //  -- a0    : receiver
2724   //  -- a2    : name
2725   //  -- ra    : return address
2726   // -----------------------------------
2727   Label miss;
2728 
2729   __ mov(v0, a0);
2730 
2731   GenerateLoadField(object, holder, v0, a3, a1, t0, index, name, &miss);
2732   __ bind(&miss);
2733   GenerateLoadMiss(masm(), Code::LOAD_IC);
2734 
2735   // Return the generated code.
2736   return GetCode(FIELD, name);
2737 }
2738 
2739 
CompileLoadCallback(Handle<String> name,Handle<JSObject> object,Handle<JSObject> holder,Handle<AccessorInfo> callback)2740 Handle<Code> LoadStubCompiler::CompileLoadCallback(
2741     Handle<String> name,
2742     Handle<JSObject> object,
2743     Handle<JSObject> holder,
2744     Handle<AccessorInfo> callback) {
2745   // ----------- S t a t e -------------
2746   //  -- a0    : receiver
2747   //  -- a2    : name
2748   //  -- ra    : return address
2749   // -----------------------------------
2750   Label miss;
2751   GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0, callback, name,
2752                        &miss);
2753   __ bind(&miss);
2754   GenerateLoadMiss(masm(), Code::LOAD_IC);
2755 
2756   // Return the generated code.
2757   return GetCode(CALLBACKS, name);
2758 }
2759 
2760 
CompileLoadConstant(Handle<JSObject> object,Handle<JSObject> holder,Handle<JSFunction> value,Handle<String> name)2761 Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
2762                                                    Handle<JSObject> holder,
2763                                                    Handle<JSFunction> value,
2764                                                    Handle<String> name) {
2765   // ----------- S t a t e -------------
2766   //  -- a0    : receiver
2767   //  -- a2    : name
2768   //  -- ra    : return address
2769   // -----------------------------------
2770   Label miss;
2771 
2772   GenerateLoadConstant(object, holder, a0, a3, a1, t0, value, name, &miss);
2773   __ bind(&miss);
2774   GenerateLoadMiss(masm(), Code::LOAD_IC);
2775 
2776   // Return the generated code.
2777   return GetCode(CONSTANT_FUNCTION, name);
2778 }
2779 
2780 
CompileLoadInterceptor(Handle<JSObject> object,Handle<JSObject> holder,Handle<String> name)2781 Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
2782                                                       Handle<JSObject> holder,
2783                                                       Handle<String> name) {
2784   // ----------- S t a t e -------------
2785   //  -- a0    : receiver
2786   //  -- a2    : name
2787   //  -- ra    : return address
2788   //  -- [sp]  : receiver
2789   // -----------------------------------
2790   Label miss;
2791 
2792   LookupResult lookup(isolate());
2793   LookupPostInterceptor(holder, name, &lookup);
2794   GenerateLoadInterceptor(object, holder, &lookup, a0, a2, a3, a1, t0, name,
2795                           &miss);
2796   __ bind(&miss);
2797   GenerateLoadMiss(masm(), Code::LOAD_IC);
2798 
2799   // Return the generated code.
2800   return GetCode(INTERCEPTOR, name);
2801 }
2802 
2803 
CompileLoadGlobal(Handle<JSObject> object,Handle<GlobalObject> holder,Handle<JSGlobalPropertyCell> cell,Handle<String> name,bool is_dont_delete)2804 Handle<Code> LoadStubCompiler::CompileLoadGlobal(
2805     Handle<JSObject> object,
2806     Handle<GlobalObject> holder,
2807     Handle<JSGlobalPropertyCell> cell,
2808     Handle<String> name,
2809     bool is_dont_delete) {
2810   // ----------- S t a t e -------------
2811   //  -- a0    : receiver
2812   //  -- a2    : name
2813   //  -- ra    : return address
2814   // -----------------------------------
2815   Label miss;
2816 
2817   // Check that the map of the global has not changed.
2818   __ JumpIfSmi(a0, &miss);
2819   CheckPrototypes(object, a0, holder, a3, t0, a1, name, &miss);
2820 
2821   // Get the value from the cell.
2822   __ li(a3, Operand(cell));
2823   __ lw(t0, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
2824 
2825   // Check for deleted property if property can actually be deleted.
2826   if (!is_dont_delete) {
2827     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2828     __ Branch(&miss, eq, t0, Operand(at));
2829   }
2830 
2831   __ mov(v0, t0);
2832   Counters* counters = masm()->isolate()->counters();
2833   __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
2834   __ Ret();
2835 
2836   __ bind(&miss);
2837   __ IncrementCounter(counters->named_load_global_stub_miss(), 1, a1, a3);
2838   GenerateLoadMiss(masm(), Code::LOAD_IC);
2839 
2840   // Return the generated code.
2841   return GetCode(NORMAL, name);
2842 }
2843 
2844 
CompileLoadField(Handle<String> name,Handle<JSObject> receiver,Handle<JSObject> holder,int index)2845 Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
2846                                                      Handle<JSObject> receiver,
2847                                                      Handle<JSObject> holder,
2848                                                      int index) {
2849   // ----------- S t a t e -------------
2850   //  -- ra    : return address
2851   //  -- a0    : key
2852   //  -- a1    : receiver
2853   // -----------------------------------
2854   Label miss;
2855 
2856   // Check the key is the cached one.
2857   __ Branch(&miss, ne, a0, Operand(name));
2858 
2859   GenerateLoadField(receiver, holder, a1, a2, a3, t0, index, name, &miss);
2860   __ bind(&miss);
2861   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
2862 
2863   return GetCode(FIELD, name);
2864 }
2865 
2866 
CompileLoadCallback(Handle<String> name,Handle<JSObject> receiver,Handle<JSObject> holder,Handle<AccessorInfo> callback)2867 Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
2868     Handle<String> name,
2869     Handle<JSObject> receiver,
2870     Handle<JSObject> holder,
2871     Handle<AccessorInfo> callback) {
2872   // ----------- S t a t e -------------
2873   //  -- ra    : return address
2874   //  -- a0    : key
2875   //  -- a1    : receiver
2876   // -----------------------------------
2877   Label miss;
2878 
2879   // Check the key is the cached one.
2880   __ Branch(&miss, ne, a0, Operand(name));
2881 
2882   GenerateLoadCallback(receiver, holder, a1, a0, a2, a3, t0, callback, name,
2883                        &miss);
2884   __ bind(&miss);
2885   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
2886 
2887   return GetCode(CALLBACKS, name);
2888 }
2889 
2890 
CompileLoadConstant(Handle<String> name,Handle<JSObject> receiver,Handle<JSObject> holder,Handle<JSFunction> value)2891 Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
2892     Handle<String> name,
2893     Handle<JSObject> receiver,
2894     Handle<JSObject> holder,
2895     Handle<JSFunction> value) {
2896   // ----------- S t a t e -------------
2897   //  -- ra    : return address
2898   //  -- a0    : key
2899   //  -- a1    : receiver
2900   // -----------------------------------
2901   Label miss;
2902 
2903   // Check the key is the cached one.
2904   __ Branch(&miss, ne, a0, Operand(name));
2905 
2906   GenerateLoadConstant(receiver, holder, a1, a2, a3, t0, value, name, &miss);
2907   __ bind(&miss);
2908   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
2909 
2910   // Return the generated code.
2911   return GetCode(CONSTANT_FUNCTION, name);
2912 }
2913 
2914 
CompileLoadInterceptor(Handle<JSObject> receiver,Handle<JSObject> holder,Handle<String> name)2915 Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
2916     Handle<JSObject> receiver,
2917     Handle<JSObject> holder,
2918     Handle<String> name) {
2919   // ----------- S t a t e -------------
2920   //  -- ra    : return address
2921   //  -- a0    : key
2922   //  -- a1    : receiver
2923   // -----------------------------------
2924   Label miss;
2925 
2926   // Check the key is the cached one.
2927   __ Branch(&miss, ne, a0, Operand(name));
2928 
2929   LookupResult lookup(isolate());
2930   LookupPostInterceptor(holder, name, &lookup);
2931   GenerateLoadInterceptor(receiver, holder, &lookup, a1, a0, a2, a3, t0, name,
2932                           &miss);
2933   __ bind(&miss);
2934   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
2935 
2936   return GetCode(INTERCEPTOR, name);
2937 }
2938 
2939 
CompileLoadArrayLength(Handle<String> name)2940 Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
2941     Handle<String> name) {
2942   // ----------- S t a t e -------------
2943   //  -- ra    : return address
2944   //  -- a0    : key
2945   //  -- a1    : receiver
2946   // -----------------------------------
2947   Label miss;
2948 
2949   // Check the key is the cached one.
2950   __ Branch(&miss, ne, a0, Operand(name));
2951 
2952   GenerateLoadArrayLength(masm(), a1, a2, &miss);
2953   __ bind(&miss);
2954   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
2955 
2956   return GetCode(CALLBACKS, name);
2957 }
2958 
2959 
CompileLoadStringLength(Handle<String> name)2960 Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
2961     Handle<String> name) {
2962   // ----------- S t a t e -------------
2963   //  -- ra    : return address
2964   //  -- a0    : key
2965   //  -- a1    : receiver
2966   // -----------------------------------
2967   Label miss;
2968 
2969   Counters* counters = masm()->isolate()->counters();
2970   __ IncrementCounter(counters->keyed_load_string_length(), 1, a2, a3);
2971 
2972   // Check the key is the cached one.
2973   __ Branch(&miss, ne, a0, Operand(name));
2974 
2975   GenerateLoadStringLength(masm(), a1, a2, a3, &miss, true);
2976   __ bind(&miss);
2977   __ DecrementCounter(counters->keyed_load_string_length(), 1, a2, a3);
2978 
2979   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
2980 
2981   return GetCode(CALLBACKS, name);
2982 }
2983 
2984 
CompileLoadFunctionPrototype(Handle<String> name)2985 Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
2986     Handle<String> name) {
2987   // ----------- S t a t e -------------
2988   //  -- ra    : return address
2989   //  -- a0    : key
2990   //  -- a1    : receiver
2991   // -----------------------------------
2992   Label miss;
2993 
2994   Counters* counters = masm()->isolate()->counters();
2995   __ IncrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
2996 
2997   // Check the name hasn't changed.
2998   __ Branch(&miss, ne, a0, Operand(name));
2999 
3000   GenerateLoadFunctionPrototype(masm(), a1, a2, a3, &miss);
3001   __ bind(&miss);
3002   __ DecrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
3003   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
3004 
3005   return GetCode(CALLBACKS, name);
3006 }
3007 
3008 
CompileLoadElement(Handle<Map> receiver_map)3009 Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
3010     Handle<Map> receiver_map) {
3011   // ----------- S t a t e -------------
3012   //  -- ra    : return address
3013   //  -- a0    : key
3014   //  -- a1    : receiver
3015   // -----------------------------------
3016   ElementsKind elements_kind = receiver_map->elements_kind();
3017   Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
3018 
3019   __ DispatchMap(a1, a2, receiver_map, stub, DO_SMI_CHECK);
3020 
3021   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
3022   __ Jump(ic, RelocInfo::CODE_TARGET);
3023 
3024   // Return the generated code.
3025   return GetCode(NORMAL, factory()->empty_string());
3026 }
3027 
3028 
CompileLoadPolymorphic(MapHandleList * receiver_maps,CodeHandleList * handler_ics)3029 Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
3030     MapHandleList* receiver_maps,
3031     CodeHandleList* handler_ics) {
3032   // ----------- S t a t e -------------
3033   //  -- ra    : return address
3034   //  -- a0    : key
3035   //  -- a1    : receiver
3036   // -----------------------------------
3037   Label miss;
3038   __ JumpIfSmi(a1, &miss);
3039 
3040   int receiver_count = receiver_maps->length();
3041   __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
3042   for (int current = 0; current < receiver_count; ++current) {
3043     __ Jump(handler_ics->at(current), RelocInfo::CODE_TARGET,
3044         eq, a2, Operand(receiver_maps->at(current)));
3045   }
3046 
3047   __ bind(&miss);
3048   Handle<Code> miss_ic = isolate()->builtins()->KeyedLoadIC_Miss();
3049   __ Jump(miss_ic, RelocInfo::CODE_TARGET);
3050 
3051   // Return the generated code.
3052   return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
3053 }
3054 
3055 
CompileStoreField(Handle<JSObject> object,int index,Handle<Map> transition,Handle<String> name)3056 Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
3057                                                        int index,
3058                                                        Handle<Map> transition,
3059                                                        Handle<String> name) {
3060   // ----------- S t a t e -------------
3061   //  -- a0    : value
3062   //  -- a1    : key
3063   //  -- a2    : receiver
3064   //  -- ra    : return address
3065   // -----------------------------------
3066 
3067   Label miss;
3068 
3069   Counters* counters = masm()->isolate()->counters();
3070   __ IncrementCounter(counters->keyed_store_field(), 1, a3, t0);
3071 
3072   // Check that the name has not changed.
3073   __ Branch(&miss, ne, a1, Operand(name));
3074 
3075   // a3 is used as scratch register. a1 and a2 keep their values if a jump to
3076   // the miss label is generated.
3077   GenerateStoreField(masm(), object, index, transition, a2, a1, a3, &miss);
3078   __ bind(&miss);
3079 
3080   __ DecrementCounter(counters->keyed_store_field(), 1, a3, t0);
3081   Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss();
3082   __ Jump(ic, RelocInfo::CODE_TARGET);
3083 
3084   // Return the generated code.
3085   return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
3086 }
3087 
3088 
CompileStoreElement(Handle<Map> receiver_map)3089 Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
3090     Handle<Map> receiver_map) {
3091   // ----------- S t a t e -------------
3092   //  -- a0    : value
3093   //  -- a1    : key
3094   //  -- a2    : receiver
3095   //  -- ra    : return address
3096   //  -- a3    : scratch
3097   // -----------------------------------
3098   ElementsKind elements_kind = receiver_map->elements_kind();
3099   bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
3100   Handle<Code> stub =
3101       KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
3102 
3103   __ DispatchMap(a2, a3, receiver_map, stub, DO_SMI_CHECK);
3104 
3105   Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
3106   __ Jump(ic, RelocInfo::CODE_TARGET);
3107 
3108   // Return the generated code.
3109   return GetCode(NORMAL, factory()->empty_string());
3110 }
3111 
3112 
CompileStorePolymorphic(MapHandleList * receiver_maps,CodeHandleList * handler_stubs,MapHandleList * transitioned_maps)3113 Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
3114     MapHandleList* receiver_maps,
3115     CodeHandleList* handler_stubs,
3116     MapHandleList* transitioned_maps) {
3117   // ----------- S t a t e -------------
3118   //  -- a0    : value
3119   //  -- a1    : key
3120   //  -- a2    : receiver
3121   //  -- ra    : return address
3122   //  -- a3    : scratch
3123   // -----------------------------------
3124   Label miss;
3125   __ JumpIfSmi(a2, &miss);
3126 
3127   int receiver_count = receiver_maps->length();
3128   __ lw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
3129   for (int i = 0; i < receiver_count; ++i) {
3130     if (transitioned_maps->at(i).is_null()) {
3131       __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq,
3132           a3, Operand(receiver_maps->at(i)));
3133     } else {
3134       Label next_map;
3135       __ Branch(&next_map, ne, a3, Operand(receiver_maps->at(i)));
3136       __ li(a3, Operand(transitioned_maps->at(i)));
3137       __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
3138       __ bind(&next_map);
3139     }
3140   }
3141 
3142   __ bind(&miss);
3143   Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
3144   __ Jump(miss_ic, RelocInfo::CODE_TARGET);
3145 
3146   // Return the generated code.
3147   return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
3148 }
3149 
3150 
CompileConstructStub(Handle<JSFunction> function)3151 Handle<Code> ConstructStubCompiler::CompileConstructStub(
3152     Handle<JSFunction> function) {
3153   // a0    : argc
3154   // a1    : constructor
3155   // ra    : return address
3156   // [sp]  : last argument
3157   Label generic_stub_call;
3158 
3159   // Use t7 for holding undefined which is used in several places below.
3160   __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
3161 
3162 #ifdef ENABLE_DEBUGGER_SUPPORT
3163   // Check to see whether there are any break points in the function code. If
3164   // there are jump to the generic constructor stub which calls the actual
3165   // code for the function thereby hitting the break points.
3166   __ lw(t5, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3167   __ lw(a2, FieldMemOperand(t5, SharedFunctionInfo::kDebugInfoOffset));
3168   __ Branch(&generic_stub_call, ne, a2, Operand(t7));
3169 #endif
3170 
3171   // Load the initial map and verify that it is in fact a map.
3172   // a1: constructor function
3173   // t7: undefined
3174   __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
3175   __ JumpIfSmi(a2, &generic_stub_call);
3176   __ GetObjectType(a2, a3, t0);
3177   __ Branch(&generic_stub_call, ne, t0, Operand(MAP_TYPE));
3178 
3179 #ifdef DEBUG
3180   // Cannot construct functions this way.
3181   // a0: argc
3182   // a1: constructor function
3183   // a2: initial map
3184   // t7: undefined
3185   __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
3186   __ Check(ne, "Function constructed by construct stub.",
3187       a3, Operand(JS_FUNCTION_TYPE));
3188 #endif
3189 
3190   // Now allocate the JSObject in new space.
3191   // a0: argc
3192   // a1: constructor function
3193   // a2: initial map
3194   // t7: undefined
3195   __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
3196   __ AllocateInNewSpace(a3, t4, t5, t6, &generic_stub_call, SIZE_IN_WORDS);
3197 
3198   // Allocated the JSObject, now initialize the fields. Map is set to initial
3199   // map and properties and elements are set to empty fixed array.
3200   // a0: argc
3201   // a1: constructor function
3202   // a2: initial map
3203   // a3: object size (in words)
3204   // t4: JSObject (not tagged)
3205   // t7: undefined
3206   __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
3207   __ mov(t5, t4);
3208   __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
3209   __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
3210   __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
3211   __ Addu(t5, t5, Operand(3 * kPointerSize));
3212   ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
3213   ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
3214   ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
3215 
3216 
3217   // Calculate the location of the first argument. The stack contains only the
3218   // argc arguments.
3219   __ sll(a1, a0, kPointerSizeLog2);
3220   __ Addu(a1, a1, sp);
3221 
3222   // Fill all the in-object properties with undefined.
3223   // a0: argc
3224   // a1: first argument
3225   // a3: object size (in words)
3226   // t4: JSObject (not tagged)
3227   // t5: First in-object property of JSObject (not tagged)
3228   // t7: undefined
3229   // Fill the initialized properties with a constant value or a passed argument
3230   // depending on the this.x = ...; assignment in the function.
3231   Handle<SharedFunctionInfo> shared(function->shared());
3232   for (int i = 0; i < shared->this_property_assignments_count(); i++) {
3233     if (shared->IsThisPropertyAssignmentArgument(i)) {
3234       Label not_passed, next;
3235       // Check if the argument assigned to the property is actually passed.
3236       int arg_number = shared->GetThisPropertyAssignmentArgument(i);
3237       __ Branch(&not_passed, less_equal, a0, Operand(arg_number));
3238       // Argument passed - find it on the stack.
3239       __ lw(a2, MemOperand(a1, (arg_number + 1) * -kPointerSize));
3240       __ sw(a2, MemOperand(t5));
3241       __ Addu(t5, t5, kPointerSize);
3242       __ jmp(&next);
3243       __ bind(&not_passed);
3244       // Set the property to undefined.
3245       __ sw(t7, MemOperand(t5));
3246       __ Addu(t5, t5, Operand(kPointerSize));
3247       __ bind(&next);
3248     } else {
3249       // Set the property to the constant value.
3250       Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
3251       __ li(a2, Operand(constant));
3252       __ sw(a2, MemOperand(t5));
3253       __ Addu(t5, t5, kPointerSize);
3254     }
3255   }
3256 
3257   // Fill the unused in-object property fields with undefined.
3258   ASSERT(function->has_initial_map());
3259   for (int i = shared->this_property_assignments_count();
3260        i < function->initial_map()->inobject_properties();
3261        i++) {
3262       __ sw(t7, MemOperand(t5));
3263       __ Addu(t5, t5, kPointerSize);
3264   }
3265 
3266   // a0: argc
3267   // t4: JSObject (not tagged)
3268   // Move argc to a1 and the JSObject to return to v0 and tag it.
3269   __ mov(a1, a0);
3270   __ mov(v0, t4);
3271   __ Or(v0, v0, Operand(kHeapObjectTag));
3272 
3273   // v0: JSObject
3274   // a1: argc
3275   // Remove caller arguments and receiver from the stack and return.
3276   __ sll(t0, a1, kPointerSizeLog2);
3277   __ Addu(sp, sp, t0);
3278   __ Addu(sp, sp, Operand(kPointerSize));
3279   Counters* counters = masm()->isolate()->counters();
3280   __ IncrementCounter(counters->constructed_objects(), 1, a1, a2);
3281   __ IncrementCounter(counters->constructed_objects_stub(), 1, a1, a2);
3282   __ Ret();
3283 
3284   // Jump to the generic stub in case the specialized code cannot handle the
3285   // construction.
3286   __ bind(&generic_stub_call);
3287   Handle<Code> generic_construct_stub =
3288       masm()->isolate()->builtins()->JSConstructStubGeneric();
3289   __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
3290 
3291   // Return the generated code.
3292   return GetCode();
3293 }
3294 
3295 
3296 #undef __
3297 #define __ ACCESS_MASM(masm)
3298 
3299 
GenerateLoadDictionaryElement(MacroAssembler * masm)3300 void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
3301     MacroAssembler* masm) {
3302   // ---------- S t a t e --------------
3303   //  -- ra     : return address
3304   //  -- a0     : key
3305   //  -- a1     : receiver
3306   // -----------------------------------
3307   Label slow, miss_force_generic;
3308 
3309   Register key = a0;
3310   Register receiver = a1;
3311 
3312   __ JumpIfNotSmi(key, &miss_force_generic);
3313   __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
3314   __ sra(a2, a0, kSmiTagSize);
3315   __ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1);
3316   __ Ret();
3317 
3318   // Slow case, key and receiver still in a0 and a1.
3319   __ bind(&slow);
3320   __ IncrementCounter(
3321       masm->isolate()->counters()->keyed_load_external_array_slow(),
3322       1, a2, a3);
3323   // Entry registers are intact.
3324   // ---------- S t a t e --------------
3325   //  -- ra     : return address
3326   //  -- a0     : key
3327   //  -- a1     : receiver
3328   // -----------------------------------
3329   Handle<Code> slow_ic =
3330       masm->isolate()->builtins()->KeyedLoadIC_Slow();
3331   __ Jump(slow_ic, RelocInfo::CODE_TARGET);
3332 
3333   // Miss case, call the runtime.
3334   __ bind(&miss_force_generic);
3335 
3336   // ---------- S t a t e --------------
3337   //  -- ra     : return address
3338   //  -- a0     : key
3339   //  -- a1     : receiver
3340   // -----------------------------------
3341 
3342   Handle<Code> miss_ic =
3343      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
3344   __ Jump(miss_ic, RelocInfo::CODE_TARGET);
3345 }
3346 
3347 
IsElementTypeSigned(ElementsKind elements_kind)3348 static bool IsElementTypeSigned(ElementsKind elements_kind) {
3349   switch (elements_kind) {
3350     case EXTERNAL_BYTE_ELEMENTS:
3351     case EXTERNAL_SHORT_ELEMENTS:
3352     case EXTERNAL_INT_ELEMENTS:
3353       return true;
3354 
3355     case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3356     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3357     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3358     case EXTERNAL_PIXEL_ELEMENTS:
3359       return false;
3360 
3361     case EXTERNAL_FLOAT_ELEMENTS:
3362     case EXTERNAL_DOUBLE_ELEMENTS:
3363     case FAST_SMI_ONLY_ELEMENTS:
3364     case FAST_ELEMENTS:
3365     case FAST_DOUBLE_ELEMENTS:
3366     case DICTIONARY_ELEMENTS:
3367     case NON_STRICT_ARGUMENTS_ELEMENTS:
3368       UNREACHABLE();
3369       return false;
3370   }
3371   return false;
3372 }
3373 
3374 
GenerateLoadExternalArray(MacroAssembler * masm,ElementsKind elements_kind)3375 void KeyedLoadStubCompiler::GenerateLoadExternalArray(
3376     MacroAssembler* masm,
3377     ElementsKind elements_kind) {
3378   // ---------- S t a t e --------------
3379   //  -- ra     : return address
3380   //  -- a0     : key
3381   //  -- a1     : receiver
3382   // -----------------------------------
3383   Label miss_force_generic, slow, failed_allocation;
3384 
3385   Register key = a0;
3386   Register receiver = a1;
3387 
3388   // This stub is meant to be tail-jumped to, the receiver must already
3389   // have been verified by the caller to not be a smi.
3390 
3391   // Check that the key is a smi.
3392   __ JumpIfNotSmi(key, &miss_force_generic);
3393 
3394   __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
3395   // a3: elements array
3396 
3397   // Check that the index is in range.
3398   __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
3399   __ sra(t2, key, kSmiTagSize);
3400   // Unsigned comparison catches both negative and too-large values.
3401   __ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1));
3402 
3403   __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
3404   // a3: base pointer of external storage
3405 
3406   // We are not untagging smi key and instead work with it
3407   // as if it was premultiplied by 2.
3408   STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
3409 
3410   Register value = a2;
3411   switch (elements_kind) {
3412     case EXTERNAL_BYTE_ELEMENTS:
3413       __ srl(t2, key, 1);
3414       __ addu(t3, a3, t2);
3415       __ lb(value, MemOperand(t3, 0));
3416       break;
3417     case EXTERNAL_PIXEL_ELEMENTS:
3418     case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3419       __ srl(t2, key, 1);
3420       __ addu(t3, a3, t2);
3421       __ lbu(value, MemOperand(t3, 0));
3422       break;
3423     case EXTERNAL_SHORT_ELEMENTS:
3424       __ addu(t3, a3, key);
3425       __ lh(value, MemOperand(t3, 0));
3426       break;
3427     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3428       __ addu(t3, a3, key);
3429       __ lhu(value, MemOperand(t3, 0));
3430       break;
3431     case EXTERNAL_INT_ELEMENTS:
3432     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3433       __ sll(t2, key, 1);
3434       __ addu(t3, a3, t2);
3435       __ lw(value, MemOperand(t3, 0));
3436       break;
3437     case EXTERNAL_FLOAT_ELEMENTS:
3438       __ sll(t3, t2, 2);
3439       __ addu(t3, a3, t3);
3440       if (CpuFeatures::IsSupported(FPU)) {
3441         CpuFeatures::Scope scope(FPU);
3442         __ lwc1(f0, MemOperand(t3, 0));
3443       } else {
3444         __ lw(value, MemOperand(t3, 0));
3445       }
3446       break;
3447     case EXTERNAL_DOUBLE_ELEMENTS:
3448       __ sll(t2, key, 2);
3449       __ addu(t3, a3, t2);
3450       if (CpuFeatures::IsSupported(FPU)) {
3451         CpuFeatures::Scope scope(FPU);
3452         __ ldc1(f0, MemOperand(t3, 0));
3453       } else {
3454         // t3: pointer to the beginning of the double we want to load.
3455         __ lw(a2, MemOperand(t3, 0));
3456         __ lw(a3, MemOperand(t3, Register::kSizeInBytes));
3457       }
3458       break;
3459     case FAST_ELEMENTS:
3460     case FAST_SMI_ONLY_ELEMENTS:
3461     case FAST_DOUBLE_ELEMENTS:
3462     case DICTIONARY_ELEMENTS:
3463     case NON_STRICT_ARGUMENTS_ELEMENTS:
3464       UNREACHABLE();
3465       break;
3466   }
3467 
3468   // For integer array types:
3469   // a2: value
3470   // For float array type:
3471   // f0: value (if FPU is supported)
3472   // a2: value (if FPU is not supported)
3473   // For double array type:
3474   // f0: value (if FPU is supported)
3475   // a2/a3: value (if FPU is not supported)
3476 
3477   if (elements_kind == EXTERNAL_INT_ELEMENTS) {
3478     // For the Int and UnsignedInt array types, we need to see whether
3479     // the value can be represented in a Smi. If not, we need to convert
3480     // it to a HeapNumber.
3481     Label box_int;
3482     __ Subu(t3, value, Operand(0xC0000000));  // Non-smi value gives neg result.
3483     __ Branch(&box_int, lt, t3, Operand(zero_reg));
3484     // Tag integer as smi and return it.
3485     __ sll(v0, value, kSmiTagSize);
3486     __ Ret();
3487 
3488     __ bind(&box_int);
3489     // Allocate a HeapNumber for the result and perform int-to-double
3490     // conversion.
3491     // The arm version uses a temporary here to save r0, but we don't need to
3492     // (a0 is not modified).
3493     __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3494     __ AllocateHeapNumber(v0, a3, t0, t1, &slow);
3495 
3496     if (CpuFeatures::IsSupported(FPU)) {
3497       CpuFeatures::Scope scope(FPU);
3498       __ mtc1(value, f0);
3499       __ cvt_d_w(f0, f0);
3500       __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag));
3501       __ Ret();
3502     } else {
3503       Register dst1 = t2;
3504       Register dst2 = t3;
3505       FloatingPointHelper::Destination dest =
3506           FloatingPointHelper::kCoreRegisters;
3507       FloatingPointHelper::ConvertIntToDouble(masm,
3508                                               value,
3509                                               dest,
3510                                               f0,
3511                                               dst1,
3512                                               dst2,
3513                                               t1,
3514                                               f2);
3515       __ sw(dst1, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
3516       __ sw(dst2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
3517       __ Ret();
3518     }
3519   } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
3520     // The test is different for unsigned int values. Since we need
3521     // the value to be in the range of a positive smi, we can't
3522     // handle either of the top two bits being set in the value.
3523     if (CpuFeatures::IsSupported(FPU)) {
3524       CpuFeatures::Scope scope(FPU);
3525       Label pl_box_int;
3526       __ And(t2, value, Operand(0xC0000000));
3527       __ Branch(&pl_box_int, ne, t2, Operand(zero_reg));
3528 
3529       // It can fit in an Smi.
3530       // Tag integer as smi and return it.
3531       __ sll(v0, value, kSmiTagSize);
3532       __ Ret();
3533 
3534       __ bind(&pl_box_int);
3535       // Allocate a HeapNumber for the result and perform int-to-double
3536       // conversion. Don't use a0 and a1 as AllocateHeapNumber clobbers all
3537       // registers - also when jumping due to exhausted young space.
3538       __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
3539       __ AllocateHeapNumber(v0, t2, t3, t6, &slow);
3540 
3541       // This is replaced by a macro:
3542       // __ mtc1(value, f0);     // LS 32-bits.
3543       // __ mtc1(zero_reg, f1);  // MS 32-bits are all zero.
3544       // __ cvt_d_l(f0, f0); // Use 64 bit conv to get correct unsigned 32-bit.
3545 
3546       __ Cvt_d_uw(f0, value, f22);
3547 
3548       __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag));
3549 
3550       __ Ret();
3551     } else {
3552       // Check whether unsigned integer fits into smi.
3553       Label box_int_0, box_int_1, done;
3554       __ And(t2, value, Operand(0x80000000));
3555       __ Branch(&box_int_0, ne, t2, Operand(zero_reg));
3556       __ And(t2, value, Operand(0x40000000));
3557       __ Branch(&box_int_1, ne, t2, Operand(zero_reg));
3558 
3559       // Tag integer as smi and return it.
3560       __ sll(v0, value, kSmiTagSize);
3561       __ Ret();
3562 
3563       Register hiword = value;  // a2.
3564       Register loword = a3;
3565 
3566       __ bind(&box_int_0);
3567       // Integer does not have leading zeros.
3568       GenerateUInt2Double(masm, hiword, loword, t0, 0);
3569       __ Branch(&done);
3570 
3571       __ bind(&box_int_1);
3572       // Integer has one leading zero.
3573       GenerateUInt2Double(masm, hiword, loword, t0, 1);
3574 
3575 
3576       __ bind(&done);
3577       // Integer was converted to double in registers hiword:loword.
3578       // Wrap it into a HeapNumber. Don't use a0 and a1 as AllocateHeapNumber
3579       // clobbers all registers - also when jumping due to exhausted young
3580       // space.
3581       __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
3582       __ AllocateHeapNumber(t2, t3, t5, t6, &slow);
3583 
3584       __ sw(hiword, FieldMemOperand(t2, HeapNumber::kExponentOffset));
3585       __ sw(loword, FieldMemOperand(t2, HeapNumber::kMantissaOffset));
3586 
3587       __ mov(v0, t2);
3588       __ Ret();
3589     }
3590   } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3591     // For the floating-point array type, we need to always allocate a
3592     // HeapNumber.
3593     if (CpuFeatures::IsSupported(FPU)) {
3594       CpuFeatures::Scope scope(FPU);
3595       // Allocate a HeapNumber for the result. Don't use a0 and a1 as
3596       // AllocateHeapNumber clobbers all registers - also when jumping due to
3597       // exhausted young space.
3598       __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
3599       __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
3600       // The float (single) value is already in fpu reg f0 (if we use float).
3601       __ cvt_d_s(f0, f0);
3602       __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag));
3603       __ Ret();
3604     } else {
3605       // Allocate a HeapNumber for the result. Don't use a0 and a1 as
3606       // AllocateHeapNumber clobbers all registers - also when jumping due to
3607       // exhausted young space.
3608       __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
3609       __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
3610       // FPU is not available, do manual single to double conversion.
3611 
3612       // a2: floating point value (binary32).
3613       // v0: heap number for result
3614 
3615       // Extract mantissa to t4.
3616       __ And(t4, value, Operand(kBinary32MantissaMask));
3617 
3618       // Extract exponent to t5.
3619       __ srl(t5, value, kBinary32MantissaBits);
3620       __ And(t5, t5, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
3621 
3622       Label exponent_rebiased;
3623       __ Branch(&exponent_rebiased, eq, t5, Operand(zero_reg));
3624 
3625       __ li(t0, 0x7ff);
3626       __ Xor(t1, t5, Operand(0xFF));
3627       __ Movz(t5, t0, t1);  // Set t5 to 0x7ff only if t5 is equal to 0xff.
3628       __ Branch(&exponent_rebiased, eq, t0, Operand(0xff));
3629 
3630       // Rebias exponent.
3631       __ Addu(t5,
3632               t5,
3633               Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
3634 
3635       __ bind(&exponent_rebiased);
3636       __ And(a2, value, Operand(kBinary32SignMask));
3637       value = no_reg;
3638       __ sll(t0, t5, HeapNumber::kMantissaBitsInTopWord);
3639       __ or_(a2, a2, t0);
3640 
3641       // Shift mantissa.
3642       static const int kMantissaShiftForHiWord =
3643           kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
3644 
3645       static const int kMantissaShiftForLoWord =
3646           kBitsPerInt - kMantissaShiftForHiWord;
3647 
3648       __ srl(t0, t4, kMantissaShiftForHiWord);
3649       __ or_(a2, a2, t0);
3650       __ sll(a0, t4, kMantissaShiftForLoWord);
3651 
3652       __ sw(a2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
3653       __ sw(a0, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
3654       __ Ret();
3655     }
3656 
3657   } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3658     if (CpuFeatures::IsSupported(FPU)) {
3659       CpuFeatures::Scope scope(FPU);
3660       // Allocate a HeapNumber for the result. Don't use a0 and a1 as
3661       // AllocateHeapNumber clobbers all registers - also when jumping due to
3662       // exhausted young space.
3663       __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
3664       __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
3665       // The double value is already in f0
3666       __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
3667       __ Ret();
3668     } else {
3669       // Allocate a HeapNumber for the result. Don't use a0 and a1 as
3670       // AllocateHeapNumber clobbers all registers - also when jumping due to
3671       // exhausted young space.
3672       __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
3673       __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
3674 
3675       __ sw(a2, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
3676       __ sw(a3, FieldMemOperand(v0, HeapNumber::kExponentOffset));
3677       __ Ret();
3678     }
3679 
3680   } else {
3681     // Tag integer as smi and return it.
3682     __ sll(v0, value, kSmiTagSize);
3683     __ Ret();
3684   }
3685 
3686   // Slow case, key and receiver still in a0 and a1.
3687   __ bind(&slow);
3688   __ IncrementCounter(
3689       masm->isolate()->counters()->keyed_load_external_array_slow(),
3690       1, a2, a3);
3691 
3692   // ---------- S t a t e --------------
3693   //  -- ra     : return address
3694   //  -- a0     : key
3695   //  -- a1     : receiver
3696   // -----------------------------------
3697 
3698   __ Push(a1, a0);
3699 
3700   __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
3701 
3702   __ bind(&miss_force_generic);
3703   Handle<Code> stub =
3704       masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
3705   __ Jump(stub, RelocInfo::CODE_TARGET);
3706 }
3707 
3708 
GenerateStoreExternalArray(MacroAssembler * masm,ElementsKind elements_kind)3709 void KeyedStoreStubCompiler::GenerateStoreExternalArray(
3710     MacroAssembler* masm,
3711     ElementsKind elements_kind) {
3712   // ---------- S t a t e --------------
3713   //  -- a0     : value
3714   //  -- a1     : key
3715   //  -- a2     : receiver
3716   //  -- ra     : return address
3717   // -----------------------------------
3718 
3719   Label slow, check_heap_number, miss_force_generic;
3720 
3721   // Register usage.
3722   Register value = a0;
3723   Register key = a1;
3724   Register receiver = a2;
3725   // a3 mostly holds the elements array or the destination external array.
3726 
3727   // This stub is meant to be tail-jumped to, the receiver must already
3728   // have been verified by the caller to not be a smi.
3729 
3730     // Check that the key is a smi.
3731   __ JumpIfNotSmi(key, &miss_force_generic);
3732 
3733   __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
3734 
3735   // Check that the index is in range.
3736   __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
3737   // Unsigned comparison catches both negative and too-large values.
3738   __ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1));
3739 
3740   // Handle both smis and HeapNumbers in the fast path. Go to the
3741   // runtime for all other kinds of values.
3742   // a3: external array.
3743 
3744   if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
3745     // Double to pixel conversion is only implemented in the runtime for now.
3746     __ JumpIfNotSmi(value, &slow);
3747   } else {
3748     __ JumpIfNotSmi(value, &check_heap_number);
3749   }
3750   __ SmiUntag(t1, value);
3751   __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
3752 
3753   // a3: base pointer of external storage.
3754   // t1: value (integer).
3755 
3756   switch (elements_kind) {
3757     case EXTERNAL_PIXEL_ELEMENTS: {
3758       // Clamp the value to [0..255].
3759       // v0 is used as a scratch register here.
3760       Label done;
3761       __ li(v0, Operand(255));
3762       // Normal branch: nop in delay slot.
3763       __ Branch(&done, gt, t1, Operand(v0));
3764       // Use delay slot in this branch.
3765       __ Branch(USE_DELAY_SLOT, &done, lt, t1, Operand(zero_reg));
3766       __ mov(v0, zero_reg);  // In delay slot.
3767       __ mov(v0, t1);  // Value is in range 0..255.
3768       __ bind(&done);
3769       __ mov(t1, v0);
3770 
3771       __ srl(t8, key, 1);
3772       __ addu(t8, a3, t8);
3773       __ sb(t1, MemOperand(t8, 0));
3774       }
3775       break;
3776     case EXTERNAL_BYTE_ELEMENTS:
3777     case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3778       __ srl(t8, key, 1);
3779       __ addu(t8, a3, t8);
3780       __ sb(t1, MemOperand(t8, 0));
3781       break;
3782     case EXTERNAL_SHORT_ELEMENTS:
3783     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3784       __ addu(t8, a3, key);
3785       __ sh(t1, MemOperand(t8, 0));
3786       break;
3787     case EXTERNAL_INT_ELEMENTS:
3788     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3789       __ sll(t8, key, 1);
3790       __ addu(t8, a3, t8);
3791       __ sw(t1, MemOperand(t8, 0));
3792       break;
3793     case EXTERNAL_FLOAT_ELEMENTS:
3794       // Perform int-to-float conversion and store to memory.
3795       __ SmiUntag(t0, key);
3796       StoreIntAsFloat(masm, a3, t0, t1, t2, t3, t4);
3797       break;
3798     case EXTERNAL_DOUBLE_ELEMENTS:
3799       __ sll(t8, key, 2);
3800       __ addu(a3, a3, t8);
3801       // a3: effective address of the double element
3802       FloatingPointHelper::Destination destination;
3803       if (CpuFeatures::IsSupported(FPU)) {
3804         destination = FloatingPointHelper::kFPURegisters;
3805       } else {
3806         destination = FloatingPointHelper::kCoreRegisters;
3807       }
3808       FloatingPointHelper::ConvertIntToDouble(
3809           masm, t1, destination,
3810           f0, t2, t3,  // These are: double_dst, dst1, dst2.
3811           t0, f2);  // These are: scratch2, single_scratch.
3812       if (destination == FloatingPointHelper::kFPURegisters) {
3813         CpuFeatures::Scope scope(FPU);
3814         __ sdc1(f0, MemOperand(a3, 0));
3815       } else {
3816         __ sw(t2, MemOperand(a3, 0));
3817         __ sw(t3, MemOperand(a3, Register::kSizeInBytes));
3818       }
3819       break;
3820     case FAST_ELEMENTS:
3821     case FAST_SMI_ONLY_ELEMENTS:
3822     case FAST_DOUBLE_ELEMENTS:
3823     case DICTIONARY_ELEMENTS:
3824     case NON_STRICT_ARGUMENTS_ELEMENTS:
3825       UNREACHABLE();
3826       break;
3827   }
3828 
3829   // Entry registers are intact, a0 holds the value which is the return value.
3830   __ mov(v0, a0);
3831   __ Ret();
3832 
3833   if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
3834     // a3: external array.
3835     __ bind(&check_heap_number);
3836     __ GetObjectType(value, t1, t2);
3837     __ Branch(&slow, ne, t2, Operand(HEAP_NUMBER_TYPE));
3838 
3839     __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
3840 
3841     // a3: base pointer of external storage.
3842 
3843     // The WebGL specification leaves the behavior of storing NaN and
3844     // +/-Infinity into integer arrays basically undefined. For more
3845     // reproducible behavior, convert these to zero.
3846 
3847     if (CpuFeatures::IsSupported(FPU)) {
3848       CpuFeatures::Scope scope(FPU);
3849 
3850       __ ldc1(f0, FieldMemOperand(a0, HeapNumber::kValueOffset));
3851 
3852       if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3853         __ cvt_s_d(f0, f0);
3854         __ sll(t8, key, 1);
3855         __ addu(t8, a3, t8);
3856         __ swc1(f0, MemOperand(t8, 0));
3857       } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3858         __ sll(t8, key, 2);
3859         __ addu(t8, a3, t8);
3860         __ sdc1(f0, MemOperand(t8, 0));
3861       } else {
3862         __ EmitECMATruncate(t3, f0, f2, t2, t1, t5);
3863 
3864         switch (elements_kind) {
3865           case EXTERNAL_BYTE_ELEMENTS:
3866           case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3867             __ srl(t8, key, 1);
3868             __ addu(t8, a3, t8);
3869             __ sb(t3, MemOperand(t8, 0));
3870             break;
3871           case EXTERNAL_SHORT_ELEMENTS:
3872           case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3873             __ addu(t8, a3, key);
3874             __ sh(t3, MemOperand(t8, 0));
3875             break;
3876           case EXTERNAL_INT_ELEMENTS:
3877           case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3878             __ sll(t8, key, 1);
3879             __ addu(t8, a3, t8);
3880             __ sw(t3, MemOperand(t8, 0));
3881             break;
3882           case EXTERNAL_PIXEL_ELEMENTS:
3883           case EXTERNAL_FLOAT_ELEMENTS:
3884           case EXTERNAL_DOUBLE_ELEMENTS:
3885           case FAST_ELEMENTS:
3886           case FAST_SMI_ONLY_ELEMENTS:
3887           case FAST_DOUBLE_ELEMENTS:
3888           case DICTIONARY_ELEMENTS:
3889           case NON_STRICT_ARGUMENTS_ELEMENTS:
3890             UNREACHABLE();
3891             break;
3892         }
3893       }
3894 
3895       // Entry registers are intact, a0 holds the value
3896       // which is the return value.
3897       __ mov(v0, a0);
3898       __ Ret();
3899     } else {
3900       // FPU is not available, do manual conversions.
3901 
3902       __ lw(t3, FieldMemOperand(value, HeapNumber::kExponentOffset));
3903       __ lw(t4, FieldMemOperand(value, HeapNumber::kMantissaOffset));
3904 
3905       if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3906         Label done, nan_or_infinity_or_zero;
3907         static const int kMantissaInHiWordShift =
3908             kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
3909 
3910         static const int kMantissaInLoWordShift =
3911             kBitsPerInt - kMantissaInHiWordShift;
3912 
3913         // Test for all special exponent values: zeros, subnormal numbers, NaNs
3914         // and infinities. All these should be converted to 0.
3915         __ li(t5, HeapNumber::kExponentMask);
3916         __ and_(t6, t3, t5);
3917         __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(zero_reg));
3918 
3919         __ xor_(t1, t6, t5);
3920         __ li(t2, kBinary32ExponentMask);
3921         __ Movz(t6, t2, t1);  // Only if t6 is equal to t5.
3922         __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(t5));
3923 
3924         // Rebias exponent.
3925         __ srl(t6, t6, HeapNumber::kExponentShift);
3926         __ Addu(t6,
3927                 t6,
3928                 Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
3929 
3930         __ li(t1, Operand(kBinary32MaxExponent));
3931         __ Slt(t1, t1, t6);
3932         __ And(t2, t3, Operand(HeapNumber::kSignMask));
3933         __ Or(t2, t2, Operand(kBinary32ExponentMask));
3934         __ Movn(t3, t2, t1);  // Only if t6 is gt kBinary32MaxExponent.
3935         __ Branch(&done, gt, t6, Operand(kBinary32MaxExponent));
3936 
3937         __ Slt(t1, t6, Operand(kBinary32MinExponent));
3938         __ And(t2, t3, Operand(HeapNumber::kSignMask));
3939         __ Movn(t3, t2, t1);  // Only if t6 is lt kBinary32MinExponent.
3940         __ Branch(&done, lt, t6, Operand(kBinary32MinExponent));
3941 
3942         __ And(t7, t3, Operand(HeapNumber::kSignMask));
3943         __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
3944         __ sll(t3, t3, kMantissaInHiWordShift);
3945         __ or_(t7, t7, t3);
3946         __ srl(t4, t4, kMantissaInLoWordShift);
3947         __ or_(t7, t7, t4);
3948         __ sll(t6, t6, kBinary32ExponentShift);
3949         __ or_(t3, t7, t6);
3950 
3951         __ bind(&done);
3952         __ sll(t9, key, 1);
3953         __ addu(t9, a2, t9);
3954         __ sw(t3, MemOperand(t9, 0));
3955 
3956         // Entry registers are intact, a0 holds the value which is the return
3957         // value.
3958         __ mov(v0, a0);
3959         __ Ret();
3960 
3961         __ bind(&nan_or_infinity_or_zero);
3962         __ And(t7, t3, Operand(HeapNumber::kSignMask));
3963         __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
3964         __ or_(t6, t6, t7);
3965         __ sll(t3, t3, kMantissaInHiWordShift);
3966         __ or_(t6, t6, t3);
3967         __ srl(t4, t4, kMantissaInLoWordShift);
3968         __ or_(t3, t6, t4);
3969         __ Branch(&done);
3970       } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3971         __ sll(t8, t0, 3);
3972         __ addu(t8, a3, t8);
3973         // t8: effective address of destination element.
3974         __ sw(t4, MemOperand(t8, 0));
3975         __ sw(t3, MemOperand(t8, Register::kSizeInBytes));
3976         __ mov(v0, a0);
3977         __ Ret();
3978       } else {
3979         bool is_signed_type = IsElementTypeSigned(elements_kind);
3980         int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
3981         int32_t min_value    = is_signed_type ? 0x80000000 : 0x00000000;
3982 
3983         Label done, sign;
3984 
3985         // Test for all special exponent values: zeros, subnormal numbers, NaNs
3986         // and infinities. All these should be converted to 0.
3987         __ li(t5, HeapNumber::kExponentMask);
3988         __ and_(t6, t3, t5);
3989         __ Movz(t3, zero_reg, t6);  // Only if t6 is equal to zero.
3990         __ Branch(&done, eq, t6, Operand(zero_reg));
3991 
3992         __ xor_(t2, t6, t5);
3993         __ Movz(t3, zero_reg, t2);  // Only if t6 is equal to t5.
3994         __ Branch(&done, eq, t6, Operand(t5));
3995 
3996         // Unbias exponent.
3997         __ srl(t6, t6, HeapNumber::kExponentShift);
3998         __ Subu(t6, t6, Operand(HeapNumber::kExponentBias));
3999         // If exponent is negative then result is 0.
4000         __ slt(t2, t6, zero_reg);
4001         __ Movn(t3, zero_reg, t2);  // Only if exponent is negative.
4002         __ Branch(&done, lt, t6, Operand(zero_reg));
4003 
4004         // If exponent is too big then result is minimal value.
4005         __ slti(t1, t6, meaningfull_bits - 1);
4006         __ li(t2, min_value);
4007         __ Movz(t3, t2, t1);  // Only if t6 is ge meaningfull_bits - 1.
4008         __ Branch(&done, ge, t6, Operand(meaningfull_bits - 1));
4009 
4010         __ And(t5, t3, Operand(HeapNumber::kSignMask));
4011         __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
4012         __ Or(t3, t3, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
4013 
4014         __ li(t9, HeapNumber::kMantissaBitsInTopWord);
4015         __ subu(t6, t9, t6);
4016         __ slt(t1, t6, zero_reg);
4017         __ srlv(t2, t3, t6);
4018         __ Movz(t3, t2, t1);  // Only if t6 is positive.
4019         __ Branch(&sign, ge, t6, Operand(zero_reg));
4020 
4021         __ subu(t6, zero_reg, t6);
4022         __ sllv(t3, t3, t6);
4023         __ li(t9, meaningfull_bits);
4024         __ subu(t6, t9, t6);
4025         __ srlv(t4, t4, t6);
4026         __ or_(t3, t3, t4);
4027 
4028         __ bind(&sign);
4029         __ subu(t2, t3, zero_reg);
4030         __ Movz(t3, t2, t5);  // Only if t5 is zero.
4031 
4032         __ bind(&done);
4033 
4034         // Result is in t3.
4035         // This switch block should be exactly the same as above (FPU mode).
4036         switch (elements_kind) {
4037           case EXTERNAL_BYTE_ELEMENTS:
4038           case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
4039             __ srl(t8, key, 1);
4040             __ addu(t8, a3, t8);
4041             __ sb(t3, MemOperand(t8, 0));
4042             break;
4043           case EXTERNAL_SHORT_ELEMENTS:
4044           case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
4045             __ addu(t8, a3, key);
4046             __ sh(t3, MemOperand(t8, 0));
4047             break;
4048           case EXTERNAL_INT_ELEMENTS:
4049           case EXTERNAL_UNSIGNED_INT_ELEMENTS:
4050             __ sll(t8, key, 1);
4051             __ addu(t8, a3, t8);
4052             __ sw(t3, MemOperand(t8, 0));
4053             break;
4054           case EXTERNAL_PIXEL_ELEMENTS:
4055           case EXTERNAL_FLOAT_ELEMENTS:
4056           case EXTERNAL_DOUBLE_ELEMENTS:
4057           case FAST_ELEMENTS:
4058           case FAST_SMI_ONLY_ELEMENTS:
4059           case FAST_DOUBLE_ELEMENTS:
4060           case DICTIONARY_ELEMENTS:
4061           case NON_STRICT_ARGUMENTS_ELEMENTS:
4062             UNREACHABLE();
4063             break;
4064         }
4065       }
4066     }
4067   }
4068 
4069   // Slow case, key and receiver still in a0 and a1.
4070   __ bind(&slow);
4071   __ IncrementCounter(
4072       masm->isolate()->counters()->keyed_load_external_array_slow(),
4073       1, a2, a3);
4074   // Entry registers are intact.
4075   // ---------- S t a t e --------------
4076   //  -- ra     : return address
4077   //  -- a0     : key
4078   //  -- a1     : receiver
4079   // -----------------------------------
4080   Handle<Code> slow_ic =
4081       masm->isolate()->builtins()->KeyedStoreIC_Slow();
4082   __ Jump(slow_ic, RelocInfo::CODE_TARGET);
4083 
4084   // Miss case, call the runtime.
4085   __ bind(&miss_force_generic);
4086 
4087   // ---------- S t a t e --------------
4088   //  -- ra     : return address
4089   //  -- a0     : key
4090   //  -- a1     : receiver
4091   // -----------------------------------
4092 
4093   Handle<Code> miss_ic =
4094      masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
4095   __ Jump(miss_ic, RelocInfo::CODE_TARGET);
4096 }
4097 
4098 
GenerateLoadFastElement(MacroAssembler * masm)4099 void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
4100   // ----------- S t a t e -------------
4101   //  -- ra    : return address
4102   //  -- a0    : key
4103   //  -- a1    : receiver
4104   // -----------------------------------
4105   Label miss_force_generic;
4106 
4107   // This stub is meant to be tail-jumped to, the receiver must already
4108   // have been verified by the caller to not be a smi.
4109 
4110   // Check that the key is a smi.
4111   __ JumpIfNotSmi(a0, &miss_force_generic, at, USE_DELAY_SLOT);
4112   // The delay slot can be safely used here, a1 is an object pointer.
4113 
4114   // Get the elements array.
4115   __ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
4116   __ AssertFastElements(a2);
4117 
4118   // Check that the key is within bounds.
4119   __ lw(a3, FieldMemOperand(a2, FixedArray::kLengthOffset));
4120   __ Branch(USE_DELAY_SLOT, &miss_force_generic, hs, a0, Operand(a3));
4121 
4122   // Load the result and make sure it's not the hole.
4123   __ Addu(a3, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4124   STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
4125   __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
4126   __ Addu(t0, t0, a3);
4127   __ lw(t0, MemOperand(t0));
4128   __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
4129   __ Branch(&miss_force_generic, eq, t0, Operand(t1));
4130   __ Ret(USE_DELAY_SLOT);
4131   __ mov(v0, t0);
4132 
4133   __ bind(&miss_force_generic);
4134   Handle<Code> stub =
4135       masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
4136   __ Jump(stub, RelocInfo::CODE_TARGET);
4137 }
4138 
4139 
GenerateLoadFastDoubleElement(MacroAssembler * masm)4140 void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
4141     MacroAssembler* masm) {
4142   // ----------- S t a t e -------------
4143   //  -- ra    : return address
4144   //  -- a0    : key
4145   //  -- a1    : receiver
4146   // -----------------------------------
4147   Label miss_force_generic, slow_allocate_heapnumber;
4148 
4149   Register key_reg = a0;
4150   Register receiver_reg = a1;
4151   Register elements_reg = a2;
4152   Register heap_number_reg = a2;
4153   Register indexed_double_offset = a3;
4154   Register scratch = t0;
4155   Register scratch2 = t1;
4156   Register scratch3 = t2;
4157   Register heap_number_map = t3;
4158 
4159   // This stub is meant to be tail-jumped to, the receiver must already
4160   // have been verified by the caller to not be a smi.
4161 
4162   // Check that the key is a smi.
4163   __ JumpIfNotSmi(key_reg, &miss_force_generic);
4164 
4165   // Get the elements array.
4166   __ lw(elements_reg,
4167         FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
4168 
4169   // Check that the key is within bounds.
4170   __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
4171   __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
4172 
4173   // Load the upper word of the double in the fixed array and test for NaN.
4174   __ sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
4175   __ Addu(indexed_double_offset, elements_reg, Operand(scratch2));
4176   uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
4177   __ lw(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
4178   __ Branch(&miss_force_generic, eq, scratch, Operand(kHoleNanUpper32));
4179 
4180   // Non-NaN. Allocate a new heap number and copy the double value into it.
4181   __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4182   __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
4183                         heap_number_map, &slow_allocate_heapnumber);
4184 
4185   // Don't need to reload the upper 32 bits of the double, it's already in
4186   // scratch.
4187   __ sw(scratch, FieldMemOperand(heap_number_reg,
4188                                  HeapNumber::kExponentOffset));
4189   __ lw(scratch, FieldMemOperand(indexed_double_offset,
4190                                  FixedArray::kHeaderSize));
4191   __ sw(scratch, FieldMemOperand(heap_number_reg,
4192                                  HeapNumber::kMantissaOffset));
4193 
4194   __ mov(v0, heap_number_reg);
4195   __ Ret();
4196 
4197   __ bind(&slow_allocate_heapnumber);
4198   Handle<Code> slow_ic =
4199       masm->isolate()->builtins()->KeyedLoadIC_Slow();
4200   __ Jump(slow_ic, RelocInfo::CODE_TARGET);
4201 
4202   __ bind(&miss_force_generic);
4203   Handle<Code> miss_ic =
4204       masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
4205   __ Jump(miss_ic, RelocInfo::CODE_TARGET);
4206 }
4207 
4208 
GenerateStoreFastElement(MacroAssembler * masm,bool is_js_array,ElementsKind elements_kind,KeyedAccessGrowMode grow_mode)4209 void KeyedStoreStubCompiler::GenerateStoreFastElement(
4210     MacroAssembler* masm,
4211     bool is_js_array,
4212     ElementsKind elements_kind,
4213     KeyedAccessGrowMode grow_mode) {
4214   // ----------- S t a t e -------------
4215   //  -- a0    : value
4216   //  -- a1    : key
4217   //  -- a2    : receiver
4218   //  -- ra    : return address
4219   //  -- a3    : scratch
4220   //  -- a4    : scratch (elements)
4221   // -----------------------------------
4222   Label miss_force_generic, transition_elements_kind, grow, slow;
4223   Label finish_store, check_capacity;
4224 
4225   Register value_reg = a0;
4226   Register key_reg = a1;
4227   Register receiver_reg = a2;
4228   Register scratch = t0;
4229   Register elements_reg = a3;
4230   Register length_reg = t1;
4231   Register scratch2 = t2;
4232 
4233   // This stub is meant to be tail-jumped to, the receiver must already
4234   // have been verified by the caller to not be a smi.
4235 
4236   // Check that the key is a smi.
4237   __ JumpIfNotSmi(key_reg, &miss_force_generic);
4238 
4239   if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
4240     __ JumpIfNotSmi(value_reg, &transition_elements_kind);
4241   }
4242 
4243   // Check that the key is within bounds.
4244   __ lw(elements_reg,
4245         FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
4246   if (is_js_array) {
4247     __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4248   } else {
4249     __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
4250   }
4251   // Compare smis.
4252   if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
4253     __ Branch(&grow, hs, key_reg, Operand(scratch));
4254   } else {
4255     __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
4256   }
4257 
4258   // Make sure elements is a fast element array, not 'cow'.
4259   __ CheckMap(elements_reg,
4260               scratch,
4261               Heap::kFixedArrayMapRootIndex,
4262               &miss_force_generic,
4263               DONT_DO_SMI_CHECK);
4264 
4265   __ bind(&finish_store);
4266 
4267   if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
4268     __ Addu(scratch,
4269             elements_reg,
4270             Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4271     STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
4272     __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
4273     __ Addu(scratch, scratch, scratch2);
4274     __ sw(value_reg, MemOperand(scratch));
4275   } else {
4276     ASSERT(elements_kind == FAST_ELEMENTS);
4277     __ Addu(scratch,
4278             elements_reg,
4279             Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4280     STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
4281     __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
4282     __ Addu(scratch, scratch, scratch2);
4283     __ sw(value_reg, MemOperand(scratch));
4284     __ mov(receiver_reg, value_reg);
4285   ASSERT(elements_kind == FAST_ELEMENTS);
4286     __ RecordWrite(elements_reg,  // Object.
4287                    scratch,       // Address.
4288                    receiver_reg,  // Value.
4289                    kRAHasNotBeenSaved,
4290                    kDontSaveFPRegs);
4291   }
4292   // value_reg (a0) is preserved.
4293   // Done.
4294   __ Ret();
4295 
4296   __ bind(&miss_force_generic);
4297   Handle<Code> ic =
4298       masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
4299   __ Jump(ic, RelocInfo::CODE_TARGET);
4300 
4301   __ bind(&transition_elements_kind);
4302   Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
4303   __ Jump(ic_miss, RelocInfo::CODE_TARGET);
4304 
4305   if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
4306     // Grow the array by a single element if possible.
4307     __ bind(&grow);
4308 
4309     // Make sure the array is only growing by a single element, anything else
4310     // must be handled by the runtime.
4311     __ Branch(&miss_force_generic, ne, key_reg, Operand(scratch));
4312 
4313     // Check for the empty array, and preallocate a small backing store if
4314     // possible.
4315     __ lw(length_reg,
4316           FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4317     __ lw(elements_reg,
4318           FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
4319     __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
4320     __ Branch(&check_capacity, ne, elements_reg, Operand(at));
4321 
4322     int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
4323     __ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow,
4324                           TAG_OBJECT);
4325 
4326     __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
4327     __ sw(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
4328     __ li(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
4329     __ sw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
4330     __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
4331     for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
4332       __ sw(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
4333     }
4334 
4335     // Store the element at index zero.
4336     __ sw(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
4337 
4338     // Install the new backing store in the JSArray.
4339     __ sw(elements_reg,
4340           FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
4341     __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
4342                         scratch, kRAHasNotBeenSaved, kDontSaveFPRegs,
4343                         EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
4344 
4345     // Increment the length of the array.
4346     __ li(length_reg, Operand(Smi::FromInt(1)));
4347     __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4348     __ Ret();
4349 
4350     __ bind(&check_capacity);
4351     // Check for cow elements, in general they are not handled by this stub
4352     __ CheckMap(elements_reg,
4353                 scratch,
4354                 Heap::kFixedCOWArrayMapRootIndex,
4355                 &miss_force_generic,
4356                 DONT_DO_SMI_CHECK);
4357 
4358     __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
4359     __ Branch(&slow, hs, length_reg, Operand(scratch));
4360 
4361     // Grow the array and finish the store.
4362     __ Addu(length_reg, length_reg, Operand(Smi::FromInt(1)));
4363     __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4364     __ jmp(&finish_store);
4365 
4366     __ bind(&slow);
4367     Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
4368     __ Jump(ic_slow, RelocInfo::CODE_TARGET);
4369   }
4370 }
4371 
4372 
GenerateStoreFastDoubleElement(MacroAssembler * masm,bool is_js_array,KeyedAccessGrowMode grow_mode)4373 void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
4374     MacroAssembler* masm,
4375     bool is_js_array,
4376     KeyedAccessGrowMode grow_mode) {
4377   // ----------- S t a t e -------------
4378   //  -- a0    : value
4379   //  -- a1    : key
4380   //  -- a2    : receiver
4381   //  -- ra    : return address
4382   //  -- a3    : scratch
4383   //  -- t0    : scratch (elements_reg)
4384   //  -- t1    : scratch (mantissa_reg)
4385   //  -- t2    : scratch (exponent_reg)
4386   //  -- t3    : scratch4
4387   // -----------------------------------
4388   Label miss_force_generic, transition_elements_kind, grow, slow;
4389   Label finish_store, check_capacity;
4390 
4391   Register value_reg = a0;
4392   Register key_reg = a1;
4393   Register receiver_reg = a2;
4394   Register elements_reg = a3;
4395   Register scratch1 = t0;
4396   Register scratch2 = t1;
4397   Register scratch3 = t2;
4398   Register scratch4 = t3;
4399   Register length_reg = t3;
4400 
4401   // This stub is meant to be tail-jumped to, the receiver must already
4402   // have been verified by the caller to not be a smi.
4403   __ JumpIfNotSmi(key_reg, &miss_force_generic);
4404 
4405   __ lw(elements_reg,
4406          FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
4407 
4408   // Check that the key is within bounds.
4409   if (is_js_array) {
4410     __ lw(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4411   } else {
4412     __ lw(scratch1,
4413           FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
4414   }
4415   // Compare smis, unsigned compare catches both negative and out-of-bound
4416   // indexes.
4417   if (grow_mode == ALLOW_JSARRAY_GROWTH) {
4418     __ Branch(&grow, hs, key_reg, Operand(scratch1));
4419   } else {
4420     __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch1));
4421   }
4422 
4423   __ bind(&finish_store);
4424 
4425   __ StoreNumberToDoubleElements(value_reg,
4426                                  key_reg,
4427                                  receiver_reg,
4428                                  elements_reg,
4429                                  scratch1,
4430                                  scratch2,
4431                                  scratch3,
4432                                  scratch4,
4433                                  &transition_elements_kind);
4434 
4435   __ Ret(USE_DELAY_SLOT);
4436   __ mov(v0, value_reg);  // In delay slot.
4437 
4438   // Handle store cache miss, replacing the ic with the generic stub.
4439   __ bind(&miss_force_generic);
4440   Handle<Code> ic =
4441       masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
4442   __ Jump(ic, RelocInfo::CODE_TARGET);
4443 
4444   __ bind(&transition_elements_kind);
4445   Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
4446   __ Jump(ic_miss, RelocInfo::CODE_TARGET);
4447 
4448   if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
4449     // Grow the array by a single element if possible.
4450     __ bind(&grow);
4451 
4452     // Make sure the array is only growing by a single element, anything else
4453     // must be handled by the runtime.
4454     __ Branch(&miss_force_generic, ne, key_reg, Operand(scratch1));
4455 
4456     // Transition on values that can't be stored in a FixedDoubleArray.
4457     Label value_is_smi;
4458     __ JumpIfSmi(value_reg, &value_is_smi);
4459     __ lw(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
4460     __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4461     __ Branch(&transition_elements_kind, ne, scratch1, Operand(at));
4462     __ bind(&value_is_smi);
4463 
4464     // Check for the empty array, and preallocate a small backing store if
4465     // possible.
4466     __ lw(length_reg,
4467           FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4468     __ lw(elements_reg,
4469           FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
4470     __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
4471     __ Branch(&check_capacity, ne, elements_reg, Operand(at));
4472 
4473     int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
4474     __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
4475                           TAG_OBJECT);
4476 
4477     // Initialize the new FixedDoubleArray. Leave elements unitialized for
4478     // efficiency, they are guaranteed to be initialized before use.
4479     __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
4480     __ sw(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
4481     __ li(scratch1, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
4482     __ sw(scratch1,
4483           FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
4484 
4485     // Install the new backing store in the JSArray.
4486     __ sw(elements_reg,
4487           FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
4488     __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
4489                         scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs,
4490                         EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
4491 
4492     // Increment the length of the array.
4493     __ li(length_reg, Operand(Smi::FromInt(1)));
4494     __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4495     __ lw(elements_reg,
4496           FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
4497     __ jmp(&finish_store);
4498 
4499     __ bind(&check_capacity);
4500     // Make sure that the backing store can hold additional elements.
4501     __ lw(scratch1,
4502           FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
4503     __ Branch(&slow, hs, length_reg, Operand(scratch1));
4504 
4505     // Grow the array and finish the store.
4506     __ Addu(length_reg, length_reg, Operand(Smi::FromInt(1)));
4507     __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
4508     __ jmp(&finish_store);
4509 
4510     __ bind(&slow);
4511     Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
4512     __ Jump(ic_slow, RelocInfo::CODE_TARGET);
4513   }
4514 }
4515 
4516 
4517 #undef __
4518 
4519 } }  // namespace v8::internal
4520 
4521 #endif  // V8_TARGET_ARCH_MIPS
4522