• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_PPC
6 
7 #include "src/codegen.h"
8 #include "src/ic/ic.h"
9 #include "src/ic/ic-compiler.h"
10 #include "src/ic/stub-cache.h"
11 
12 namespace v8 {
13 namespace internal {
14 
15 
16 // ----------------------------------------------------------------------------
17 // Static IC stub generators.
18 //
19 
20 #define __ ACCESS_MASM(masm)
21 
22 // Helper function used from LoadIC GenerateNormal.
23 //
24 // elements: Property dictionary. It is not clobbered if a jump to the miss
25 //           label is done.
26 // name:     Property name. It is not clobbered if a jump to the miss label is
27 //           done
28 // result:   Register for the result. It is only updated if a jump to the miss
29 //           label is not done. Can be the same as elements or name clobbering
30 //           one of these in the case of not jumping to the miss label.
31 // The two scratch registers need to be different from elements, name and
32 // result.
33 // The generated code assumes that the receiver has slow properties,
34 // is not a global object and does not have interceptors.
GenerateDictionaryLoad(MacroAssembler * masm,Label * miss,Register elements,Register name,Register result,Register scratch1,Register scratch2)35 static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
36                                    Register elements, Register name,
37                                    Register result, Register scratch1,
38                                    Register scratch2) {
39   // Main use of the scratch registers.
40   // scratch1: Used as temporary and to hold the capacity of the property
41   //           dictionary.
42   // scratch2: Used as temporary.
43   Label done;
44 
45   // Probe the dictionary.
46   NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
47                                                    name, scratch1, scratch2);
48 
49   // If probing finds an entry check that the value is a normal
50   // property.
51   __ bind(&done);  // scratch2 == elements + 4 * index
52   const int kElementsStartOffset =
53       NameDictionary::kHeaderSize +
54       NameDictionary::kElementsStartIndex * kPointerSize;
55   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
56   __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
57   __ mr(r0, scratch2);
58   __ LoadSmiLiteral(scratch2, Smi::FromInt(PropertyDetails::TypeField::kMask));
59   __ and_(scratch2, scratch1, scratch2, SetRC);
60   __ bne(miss, cr0);
61   __ mr(scratch2, r0);
62 
63   // Get the value at the masked, scaled index and return.
64   __ LoadP(result,
65            FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
66 }
67 
68 
69 // Helper function used from StoreIC::GenerateNormal.
70 //
71 // elements: Property dictionary. It is not clobbered if a jump to the miss
72 //           label is done.
73 // name:     Property name. It is not clobbered if a jump to the miss label is
74 //           done
75 // value:    The value to store.
76 // The two scratch registers need to be different from elements, name and
77 // result.
78 // The generated code assumes that the receiver has slow properties,
79 // is not a global object and does not have interceptors.
GenerateDictionaryStore(MacroAssembler * masm,Label * miss,Register elements,Register name,Register value,Register scratch1,Register scratch2)80 static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
81                                     Register elements, Register name,
82                                     Register value, Register scratch1,
83                                     Register scratch2) {
84   // Main use of the scratch registers.
85   // scratch1: Used as temporary and to hold the capacity of the property
86   //           dictionary.
87   // scratch2: Used as temporary.
88   Label done;
89 
90   // Probe the dictionary.
91   NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
92                                                    name, scratch1, scratch2);
93 
94   // If probing finds an entry in the dictionary check that the value
95   // is a normal property that is not read only.
96   __ bind(&done);  // scratch2 == elements + 4 * index
97   const int kElementsStartOffset =
98       NameDictionary::kHeaderSize +
99       NameDictionary::kElementsStartIndex * kPointerSize;
100   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
101   int kTypeAndReadOnlyMask =
102       PropertyDetails::TypeField::kMask |
103       PropertyDetails::AttributesField::encode(READ_ONLY);
104   __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
105   __ mr(r0, scratch2);
106   __ LoadSmiLiteral(scratch2, Smi::FromInt(kTypeAndReadOnlyMask));
107   __ and_(scratch2, scratch1, scratch2, SetRC);
108   __ bne(miss, cr0);
109   __ mr(scratch2, r0);
110 
111   // Store the value at the masked, scaled index and return.
112   const int kValueOffset = kElementsStartOffset + kPointerSize;
113   __ addi(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
114   __ StoreP(value, MemOperand(scratch2));
115 
116   // Update the write barrier. Make sure not to clobber the value.
117   __ mr(scratch1, value);
118   __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
119                  kDontSaveFPRegs);
120 }
121 
GenerateNormal(MacroAssembler * masm)122 void LoadIC::GenerateNormal(MacroAssembler* masm) {
123   Register dictionary = r3;
124   DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
125   DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
126 
127   Label slow;
128 
129   __ LoadP(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
130                                        JSObject::kPropertiesOffset));
131   GenerateDictionaryLoad(masm, &slow, dictionary,
132                          LoadDescriptor::NameRegister(), r3, r6, r7);
133   __ Ret();
134 
135   // Dictionary load failed, go slow (but don't miss).
136   __ bind(&slow);
137   GenerateRuntimeGetProperty(masm);
138 }
139 
140 
141 // A register that isn't one of the parameters to the load ic.
LoadIC_TempRegister()142 static const Register LoadIC_TempRegister() { return r6; }
143 
144 
LoadIC_PushArgs(MacroAssembler * masm)145 static void LoadIC_PushArgs(MacroAssembler* masm) {
146   Register receiver = LoadDescriptor::ReceiverRegister();
147   Register name = LoadDescriptor::NameRegister();
148   Register slot = LoadDescriptor::SlotRegister();
149   Register vector = LoadWithVectorDescriptor::VectorRegister();
150 
151   __ Push(receiver, name, slot, vector);
152 }
153 
154 
GenerateMiss(MacroAssembler * masm)155 void LoadIC::GenerateMiss(MacroAssembler* masm) {
156   // The return address is in lr.
157   Isolate* isolate = masm->isolate();
158 
159   DCHECK(!AreAliased(r7, r8, LoadWithVectorDescriptor::SlotRegister(),
160                      LoadWithVectorDescriptor::VectorRegister()));
161   __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, r7, r8);
162 
163   LoadIC_PushArgs(masm);
164 
165   // Perform tail call to the entry.
166   __ TailCallRuntime(Runtime::kLoadIC_Miss);
167 }
168 
GenerateRuntimeGetProperty(MacroAssembler * masm)169 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
170   // The return address is in lr.
171 
172   __ mr(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
173   __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
174 
175   // Do tail-call to runtime routine.
176   __ TailCallRuntime(Runtime::kGetProperty);
177 }
178 
179 
GenerateMiss(MacroAssembler * masm)180 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
181   // The return address is in lr.
182   Isolate* isolate = masm->isolate();
183 
184   DCHECK(!AreAliased(r7, r8, LoadWithVectorDescriptor::SlotRegister(),
185                      LoadWithVectorDescriptor::VectorRegister()));
186   __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, r7, r8);
187 
188   LoadIC_PushArgs(masm);
189 
190   // Perform tail call to the entry.
191   __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
192 }
193 
GenerateRuntimeGetProperty(MacroAssembler * masm)194 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
195   // The return address is in lr.
196 
197   __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
198 
199   // Do tail-call to runtime routine.
200   __ TailCallRuntime(Runtime::kKeyedGetProperty);
201 }
202 
StoreIC_PushArgs(MacroAssembler * masm)203 static void StoreIC_PushArgs(MacroAssembler* masm) {
204   __ Push(StoreWithVectorDescriptor::ValueRegister(),
205           StoreWithVectorDescriptor::SlotRegister(),
206           StoreWithVectorDescriptor::VectorRegister(),
207           StoreWithVectorDescriptor::ReceiverRegister(),
208           StoreWithVectorDescriptor::NameRegister());
209 }
210 
211 
GenerateMiss(MacroAssembler * masm)212 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
213   StoreIC_PushArgs(masm);
214 
215   __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
216 }
217 
GenerateSlow(MacroAssembler * masm)218 void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
219   StoreIC_PushArgs(masm);
220 
221   // The slow case calls into the runtime to complete the store without causing
222   // an IC miss that would otherwise cause a transition to the generic stub.
223   __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
224 }
225 
KeyedStoreGenerateMegamorphicHelper(MacroAssembler * masm,Label * fast_object,Label * fast_double,Label * slow,KeyedStoreCheckMap check_map,KeyedStoreIncrementLength increment_length,Register value,Register key,Register receiver,Register receiver_map,Register elements_map,Register elements)226 static void KeyedStoreGenerateMegamorphicHelper(
227     MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
228     KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
229     Register value, Register key, Register receiver, Register receiver_map,
230     Register elements_map, Register elements) {
231   Label transition_smi_elements;
232   Label finish_object_store, non_double_value, transition_double_elements;
233   Label fast_double_without_map_check;
234 
235   // Fast case: Do the store, could be either Object or double.
236   __ bind(fast_object);
237   Register scratch = r7;
238   Register address = r8;
239   DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
240                      scratch, address));
241 
242   if (check_map == kCheckMap) {
243     __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
244     __ mov(scratch, Operand(masm->isolate()->factory()->fixed_array_map()));
245     __ cmp(elements_map, scratch);
246     __ bne(fast_double);
247   }
248 
249   // HOLECHECK: guards "A[i] = V"
250   // We have to go to the runtime if the current value is the hole because
251   // there may be a callback on the element
252   Label holecheck_passed1;
253   __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
254   __ SmiToPtrArrayOffset(scratch, key);
255   __ LoadPX(scratch, MemOperand(address, scratch));
256   __ Cmpi(scratch, Operand(masm->isolate()->factory()->the_hole_value()), r0);
257   __ bne(&holecheck_passed1);
258   __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
259 
260   __ bind(&holecheck_passed1);
261 
262   // Smi stores don't require further checks.
263   Label non_smi_value;
264   __ JumpIfNotSmi(value, &non_smi_value);
265 
266   if (increment_length == kIncrementLength) {
267     // Add 1 to receiver->length.
268     __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
269     __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0);
270   }
271   // It's irrelevant whether array is smi-only or not when writing a smi.
272   __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
273   __ SmiToPtrArrayOffset(scratch, key);
274   __ StorePX(value, MemOperand(address, scratch));
275   __ Ret();
276 
277   __ bind(&non_smi_value);
278   // Escape to elements kind transition case.
279   __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
280 
281   // Fast elements array, store the value to the elements backing store.
282   __ bind(&finish_object_store);
283   if (increment_length == kIncrementLength) {
284     // Add 1 to receiver->length.
285     __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
286     __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0);
287   }
288   __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
289   __ SmiToPtrArrayOffset(scratch, key);
290   __ StorePUX(value, MemOperand(address, scratch));
291   // Update write barrier for the elements array address.
292   __ mr(scratch, value);  // Preserve the value which is returned.
293   __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved,
294                  kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
295   __ Ret();
296 
297   __ bind(fast_double);
298   if (check_map == kCheckMap) {
299     // Check for fast double array case. If this fails, call through to the
300     // runtime.
301     __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
302     __ bne(slow);
303   }
304 
305   // HOLECHECK: guards "A[i] double hole?"
306   // We have to see if the double version of the hole is present. If so
307   // go to the runtime.
308   __ addi(address, elements,
309           Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
310                    kHeapObjectTag)));
311   __ SmiToDoubleArrayOffset(scratch, key);
312   __ lwzx(scratch, MemOperand(address, scratch));
313   __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
314   __ bne(&fast_double_without_map_check);
315   __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
316 
317   __ bind(&fast_double_without_map_check);
318   __ StoreNumberToDoubleElements(value, key, elements, scratch, d0,
319                                  &transition_double_elements);
320   if (increment_length == kIncrementLength) {
321     // Add 1 to receiver->length.
322     __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
323     __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0);
324   }
325   __ Ret();
326 
327   __ bind(&transition_smi_elements);
328   // Transition the array appropriately depending on the value type.
329   __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
330   __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
331   __ bne(&non_double_value);
332 
333   // Value is a double. Transition FAST_SMI_ELEMENTS ->
334   // FAST_DOUBLE_ELEMENTS and complete the store.
335   __ LoadTransitionedArrayMapConditional(
336       FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
337   AllocationSiteMode mode =
338       AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
339   ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
340                                                    receiver_map, mode, slow);
341   __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
342   __ b(&fast_double_without_map_check);
343 
344   __ bind(&non_double_value);
345   // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
346   __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
347                                          receiver_map, scratch, slow);
348   mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
349   ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
350       masm, receiver, key, value, receiver_map, mode, slow);
351   __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
352   __ b(&finish_object_store);
353 
354   __ bind(&transition_double_elements);
355   // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
356   // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
357   // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
358   __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
359                                          receiver_map, scratch, slow);
360   mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
361   ElementsTransitionGenerator::GenerateDoubleToObject(
362       masm, receiver, key, value, receiver_map, mode, slow);
363   __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
364   __ b(&finish_object_store);
365 }
366 
367 
GenerateMegamorphic(MacroAssembler * masm,LanguageMode language_mode)368 void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
369                                        LanguageMode language_mode) {
370   // ---------- S t a t e --------------
371   //  -- r3     : value
372   //  -- r4     : key
373   //  -- r5     : receiver
374   //  -- lr     : return address
375   // -----------------------------------
376   Label slow, fast_object, fast_object_grow;
377   Label fast_double, fast_double_grow;
378   Label array, extra, check_if_double_array, maybe_name_key, miss;
379 
380   // Register usage.
381   Register value = StoreDescriptor::ValueRegister();
382   Register key = StoreDescriptor::NameRegister();
383   Register receiver = StoreDescriptor::ReceiverRegister();
384   DCHECK(receiver.is(r4));
385   DCHECK(key.is(r5));
386   DCHECK(value.is(r3));
387   Register receiver_map = r6;
388   Register elements_map = r9;
389   Register elements = r10;  // Elements array of the receiver.
390   // r7 and r8 are used as general scratch registers.
391 
392   // Check that the key is a smi.
393   __ JumpIfNotSmi(key, &maybe_name_key);
394   // Check that the object isn't a smi.
395   __ JumpIfSmi(receiver, &slow);
396   // Get the map of the object.
397   __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
398   // Check that the receiver does not require access checks.
399   // The generic stub does not perform map checks.
400   __ lbz(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
401   __ andi(r0, ip, Operand(1 << Map::kIsAccessCheckNeeded));
402   __ bne(&slow, cr0);
403   // Check if the object is a JS array or not.
404   __ lbz(r7, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
405   __ cmpi(r7, Operand(JS_ARRAY_TYPE));
406   __ beq(&array);
407   // Check that the object is some kind of JSObject.
408   __ cmpi(r7, Operand(FIRST_JS_OBJECT_TYPE));
409   __ blt(&slow);
410 
411   // Object case: Check key against length in the elements array.
412   __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
413   // Check array bounds. Both the key and the length of FixedArray are smis.
414   __ LoadP(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
415   __ cmpl(key, ip);
416   __ blt(&fast_object);
417 
418   // Slow case, handle jump to runtime.
419   __ bind(&slow);
420   // Entry registers are intact.
421   // r3: value.
422   // r4: key.
423   // r5: receiver.
424   PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
425   // Never returns to here.
426 
427   __ bind(&maybe_name_key);
428   __ LoadP(r7, FieldMemOperand(key, HeapObject::kMapOffset));
429   __ lbz(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset));
430   __ JumpIfNotUniqueNameInstanceType(r7, &slow);
431 
432   // The handlers in the stub cache expect a vector and slot. Since we won't
433   // change the IC from any downstream misses, a dummy vector can be used.
434   Register vector = StoreWithVectorDescriptor::VectorRegister();
435   Register slot = StoreWithVectorDescriptor::SlotRegister();
436   DCHECK(!AreAliased(vector, slot, r8, r9, r10, r11));
437   Handle<TypeFeedbackVector> dummy_vector =
438       TypeFeedbackVector::DummyVector(masm->isolate());
439   int slot_index = dummy_vector->GetIndex(
440       FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
441   __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
442   __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
443 
444   masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, r8,
445                                                      r9, r10, r11);
446   // Cache miss.
447   __ b(&miss);
448 
449   // Extra capacity case: Check if there is extra capacity to
450   // perform the store and update the length. Used for adding one
451   // element to the array by writing to array[array.length].
452   __ bind(&extra);
453   // Condition code from comparing key and array length is still available.
454   __ bne(&slow);  // Only support writing to writing to array[array.length].
455   // Check for room in the elements backing store.
456   // Both the key and the length of FixedArray are smis.
457   __ LoadP(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
458   __ cmpl(key, ip);
459   __ bge(&slow);
460   __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
461   __ mov(ip, Operand(masm->isolate()->factory()->fixed_array_map()));
462   __ cmp(elements_map, ip);  // PPC - I think I can re-use ip here
463   __ bne(&check_if_double_array);
464   __ b(&fast_object_grow);
465 
466   __ bind(&check_if_double_array);
467   __ mov(ip, Operand(masm->isolate()->factory()->fixed_double_array_map()));
468   __ cmp(elements_map, ip);  // PPC - another ip re-use
469   __ bne(&slow);
470   __ b(&fast_double_grow);
471 
472   // Array case: Get the length and the elements array from the JS
473   // array. Check that the array is in fast mode (and writable); if it
474   // is the length is always a smi.
475   __ bind(&array);
476   __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
477 
478   // Check the key against the length in the array.
479   __ LoadP(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
480   __ cmpl(key, ip);
481   __ bge(&extra);
482 
483   KeyedStoreGenerateMegamorphicHelper(
484       masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
485       value, key, receiver, receiver_map, elements_map, elements);
486   KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
487                                       &fast_double_grow, &slow, kDontCheckMap,
488                                       kIncrementLength, value, key, receiver,
489                                       receiver_map, elements_map, elements);
490   __ bind(&miss);
491   GenerateMiss(masm);
492 }
493 
GenerateMiss(MacroAssembler * masm)494 void StoreIC::GenerateMiss(MacroAssembler* masm) {
495   StoreIC_PushArgs(masm);
496 
497   // Perform tail call to the entry.
498   __ TailCallRuntime(Runtime::kStoreIC_Miss);
499 }
500 
501 
GenerateNormal(MacroAssembler * masm)502 void StoreIC::GenerateNormal(MacroAssembler* masm) {
503   Label miss;
504   Register receiver = StoreDescriptor::ReceiverRegister();
505   Register name = StoreDescriptor::NameRegister();
506   Register value = StoreDescriptor::ValueRegister();
507   Register dictionary = r8;
508   DCHECK(receiver.is(r4));
509   DCHECK(name.is(r5));
510   DCHECK(value.is(r3));
511   DCHECK(StoreWithVectorDescriptor::VectorRegister().is(r6));
512   DCHECK(StoreWithVectorDescriptor::SlotRegister().is(r7));
513 
514   __ LoadP(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
515 
516   GenerateDictionaryStore(masm, &miss, dictionary, name, value, r9, r10);
517   Counters* counters = masm->isolate()->counters();
518   __ IncrementCounter(counters->ic_store_normal_hit(), 1, r9, r10);
519   __ Ret();
520 
521   __ bind(&miss);
522   __ IncrementCounter(counters->ic_store_normal_miss(), 1, r9, r10);
523   GenerateMiss(masm);
524 }
525 
526 
527 #undef __
528 
529 
ComputeCondition(Token::Value op)530 Condition CompareIC::ComputeCondition(Token::Value op) {
531   switch (op) {
532     case Token::EQ_STRICT:
533     case Token::EQ:
534       return eq;
535     case Token::LT:
536       return lt;
537     case Token::GT:
538       return gt;
539     case Token::LTE:
540       return le;
541     case Token::GTE:
542       return ge;
543     default:
544       UNREACHABLE();
545       return kNoCondition;
546   }
547 }
548 
549 
HasInlinedSmiCode(Address address)550 bool CompareIC::HasInlinedSmiCode(Address address) {
551   // The address of the instruction following the call.
552   Address cmp_instruction_address =
553       Assembler::return_address_from_call_start(address);
554 
555   // If the instruction following the call is not a cmp rx, #yyy, nothing
556   // was inlined.
557   Instr instr = Assembler::instr_at(cmp_instruction_address);
558   return Assembler::IsCmpImmediate(instr);
559 }
560 
561 
562 //
563 // This code is paired with the JumpPatchSite class in full-codegen-ppc.cc
564 //
PatchInlinedSmiCode(Isolate * isolate,Address address,InlinedSmiCheck check)565 void PatchInlinedSmiCode(Isolate* isolate, Address address,
566                          InlinedSmiCheck check) {
567   Address cmp_instruction_address =
568       Assembler::return_address_from_call_start(address);
569 
570   // If the instruction following the call is not a cmp rx, #yyy, nothing
571   // was inlined.
572   Instr instr = Assembler::instr_at(cmp_instruction_address);
573   if (!Assembler::IsCmpImmediate(instr)) {
574     return;
575   }
576 
577   // The delta to the start of the map check instruction and the
578   // condition code uses at the patched jump.
579   int delta = Assembler::GetCmpImmediateRawImmediate(instr);
580   delta += Assembler::GetCmpImmediateRegister(instr).code() * kOff16Mask;
581   // If the delta is 0 the instruction is cmp r0, #0 which also signals that
582   // nothing was inlined.
583   if (delta == 0) {
584     return;
585   }
586 
587   if (FLAG_trace_ic) {
588     PrintF("[  patching ic at %p, cmp=%p, delta=%d\n",
589            static_cast<void*>(address),
590            static_cast<void*>(cmp_instruction_address), delta);
591   }
592 
593   Address patch_address =
594       cmp_instruction_address - delta * Instruction::kInstrSize;
595   Instr instr_at_patch = Assembler::instr_at(patch_address);
596   Instr branch_instr =
597       Assembler::instr_at(patch_address + Instruction::kInstrSize);
598   // This is patching a conditional "jump if not smi/jump if smi" site.
599   // Enabling by changing from
600   //   cmp cr0, rx, rx
601   // to
602   //  rlwinm(r0, value, 0, 31, 31, SetRC);
603   //  bc(label, BT/BF, 2)
604   // and vice-versa to be disabled again.
605   CodePatcher patcher(isolate, patch_address, 2);
606   Register reg = Assembler::GetRA(instr_at_patch);
607   if (check == ENABLE_INLINED_SMI_CHECK) {
608     DCHECK(Assembler::IsCmpRegister(instr_at_patch));
609     DCHECK_EQ(Assembler::GetRA(instr_at_patch).code(),
610               Assembler::GetRB(instr_at_patch).code());
611     patcher.masm()->TestIfSmi(reg, r0);
612   } else {
613     DCHECK(check == DISABLE_INLINED_SMI_CHECK);
614     DCHECK(Assembler::IsAndi(instr_at_patch));
615     patcher.masm()->cmp(reg, reg, cr0);
616   }
617   DCHECK(Assembler::IsBranch(branch_instr));
618 
619   // Invert the logic of the branch
620   if (Assembler::GetCondition(branch_instr) == eq) {
621     patcher.EmitCondition(ne);
622   } else {
623     DCHECK(Assembler::GetCondition(branch_instr) == ne);
624     patcher.EmitCondition(eq);
625   }
626 }
627 }  // namespace internal
628 }  // namespace v8
629 
630 #endif  // V8_TARGET_ARCH_PPC
631