• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #if V8_TARGET_ARCH_ARM
8 
9 #include "src/codegen.h"
10 #include "src/macro-assembler.h"
11 #include "src/arm/simulator-arm.h"
12 
13 namespace v8 {
14 namespace internal {
15 
16 
17 #define __ masm.
18 
19 
20 #if defined(USE_SIMULATOR)
21 byte* fast_exp_arm_machine_code = NULL;
fast_exp_simulator(double x)22 double fast_exp_simulator(double x) {
23   return Simulator::current(Isolate::Current())->CallFPReturnsDouble(
24       fast_exp_arm_machine_code, x, 0);
25 }
26 #endif
27 
28 
CreateExpFunction()29 UnaryMathFunction CreateExpFunction() {
30   if (!FLAG_fast_math) return &std::exp;
31   size_t actual_size;
32   byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
33   if (buffer == NULL) return &std::exp;
34   ExternalReference::InitializeMathExpData();
35 
36   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
37 
38   {
39     DwVfpRegister input = d0;
40     DwVfpRegister result = d1;
41     DwVfpRegister double_scratch1 = d2;
42     DwVfpRegister double_scratch2 = d3;
43     Register temp1 = r4;
44     Register temp2 = r5;
45     Register temp3 = r6;
46 
47     if (masm.use_eabi_hardfloat()) {
48       // Input value is in d0 anyway, nothing to do.
49     } else {
50       __ vmov(input, r0, r1);
51     }
52     __ Push(temp3, temp2, temp1);
53     MathExpGenerator::EmitMathExp(
54         &masm, input, result, double_scratch1, double_scratch2,
55         temp1, temp2, temp3);
56     __ Pop(temp3, temp2, temp1);
57     if (masm.use_eabi_hardfloat()) {
58       __ vmov(d0, result);
59     } else {
60       __ vmov(r0, r1, result);
61     }
62     __ Ret();
63   }
64 
65   CodeDesc desc;
66   masm.GetCode(&desc);
67   ASSERT(!RelocInfo::RequiresRelocation(desc));
68 
69   CPU::FlushICache(buffer, actual_size);
70   OS::ProtectCode(buffer, actual_size);
71 
72 #if !defined(USE_SIMULATOR)
73   return FUNCTION_CAST<UnaryMathFunction>(buffer);
74 #else
75   fast_exp_arm_machine_code = buffer;
76   return &fast_exp_simulator;
77 #endif
78 }
79 
80 #if defined(V8_HOST_ARCH_ARM)
CreateMemCopyUint8Function(MemCopyUint8Function stub)81 MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
82 #if defined(USE_SIMULATOR)
83   return stub;
84 #else
85   if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
86   size_t actual_size;
87   byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
88   if (buffer == NULL) return stub;
89 
90   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
91 
92   Register dest = r0;
93   Register src = r1;
94   Register chars = r2;
95   Register temp1 = r3;
96   Label less_4;
97 
98   if (CpuFeatures::IsSupported(NEON)) {
99     Label loop, less_256, less_128, less_64, less_32, _16_or_less, _8_or_less;
100     Label size_less_than_8;
101     __ pld(MemOperand(src, 0));
102 
103     __ cmp(chars, Operand(8));
104     __ b(lt, &size_less_than_8);
105     __ cmp(chars, Operand(32));
106     __ b(lt, &less_32);
107     if (CpuFeatures::cache_line_size() == 32) {
108       __ pld(MemOperand(src, 32));
109     }
110     __ cmp(chars, Operand(64));
111     __ b(lt, &less_64);
112     __ pld(MemOperand(src, 64));
113     if (CpuFeatures::cache_line_size() == 32) {
114       __ pld(MemOperand(src, 96));
115     }
116     __ cmp(chars, Operand(128));
117     __ b(lt, &less_128);
118     __ pld(MemOperand(src, 128));
119     if (CpuFeatures::cache_line_size() == 32) {
120       __ pld(MemOperand(src, 160));
121     }
122     __ pld(MemOperand(src, 192));
123     if (CpuFeatures::cache_line_size() == 32) {
124       __ pld(MemOperand(src, 224));
125     }
126     __ cmp(chars, Operand(256));
127     __ b(lt, &less_256);
128     __ sub(chars, chars, Operand(256));
129 
130     __ bind(&loop);
131     __ pld(MemOperand(src, 256));
132     __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
133     if (CpuFeatures::cache_line_size() == 32) {
134       __ pld(MemOperand(src, 256));
135     }
136     __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
137     __ sub(chars, chars, Operand(64), SetCC);
138     __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
139     __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
140     __ b(ge, &loop);
141     __ add(chars, chars, Operand(256));
142 
143     __ bind(&less_256);
144     __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
145     __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
146     __ sub(chars, chars, Operand(128));
147     __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
148     __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
149     __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
150     __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
151     __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
152     __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
153     __ cmp(chars, Operand(64));
154     __ b(lt, &less_64);
155 
156     __ bind(&less_128);
157     __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
158     __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
159     __ sub(chars, chars, Operand(64));
160     __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
161     __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
162 
163     __ bind(&less_64);
164     __ cmp(chars, Operand(32));
165     __ b(lt, &less_32);
166     __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
167     __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
168     __ sub(chars, chars, Operand(32));
169 
170     __ bind(&less_32);
171     __ cmp(chars, Operand(16));
172     __ b(le, &_16_or_less);
173     __ vld1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(src, PostIndex));
174     __ vst1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
175     __ sub(chars, chars, Operand(16));
176 
177     __ bind(&_16_or_less);
178     __ cmp(chars, Operand(8));
179     __ b(le, &_8_or_less);
180     __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
181     __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest, PostIndex));
182     __ sub(chars, chars, Operand(8));
183 
184     // Do a last copy which may overlap with the previous copy (up to 8 bytes).
185     __ bind(&_8_or_less);
186     __ rsb(chars, chars, Operand(8));
187     __ sub(src, src, Operand(chars));
188     __ sub(dest, dest, Operand(chars));
189     __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
190     __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest));
191 
192     __ Ret();
193 
194     __ bind(&size_less_than_8);
195 
196     __ bic(temp1, chars, Operand(0x3), SetCC);
197     __ b(&less_4, eq);
198     __ ldr(temp1, MemOperand(src, 4, PostIndex));
199     __ str(temp1, MemOperand(dest, 4, PostIndex));
200   } else {
201     Register temp2 = ip;
202     Label loop;
203 
204     __ bic(temp2, chars, Operand(0x3), SetCC);
205     __ b(&less_4, eq);
206     __ add(temp2, dest, temp2);
207 
208     __ bind(&loop);
209     __ ldr(temp1, MemOperand(src, 4, PostIndex));
210     __ str(temp1, MemOperand(dest, 4, PostIndex));
211     __ cmp(dest, temp2);
212     __ b(&loop, ne);
213   }
214 
215   __ bind(&less_4);
216   __ mov(chars, Operand(chars, LSL, 31), SetCC);
217   // bit0 => Z (ne), bit1 => C (cs)
218   __ ldrh(temp1, MemOperand(src, 2, PostIndex), cs);
219   __ strh(temp1, MemOperand(dest, 2, PostIndex), cs);
220   __ ldrb(temp1, MemOperand(src), ne);
221   __ strb(temp1, MemOperand(dest), ne);
222   __ Ret();
223 
224   CodeDesc desc;
225   masm.GetCode(&desc);
226   ASSERT(!RelocInfo::RequiresRelocation(desc));
227 
228   CPU::FlushICache(buffer, actual_size);
229   OS::ProtectCode(buffer, actual_size);
230   return FUNCTION_CAST<MemCopyUint8Function>(buffer);
231 #endif
232 }
233 
234 
235 // Convert 8 to 16. The number of character to copy must be at least 8.
CreateMemCopyUint16Uint8Function(MemCopyUint16Uint8Function stub)236 MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
237     MemCopyUint16Uint8Function stub) {
238 #if defined(USE_SIMULATOR)
239   return stub;
240 #else
241   if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
242   size_t actual_size;
243   byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
244   if (buffer == NULL) return stub;
245 
246   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
247 
248   Register dest = r0;
249   Register src = r1;
250   Register chars = r2;
251   if (CpuFeatures::IsSupported(NEON)) {
252     Register temp = r3;
253     Label loop;
254 
255     __ bic(temp, chars, Operand(0x7));
256     __ sub(chars, chars, Operand(temp));
257     __ add(temp, dest, Operand(temp, LSL, 1));
258 
259     __ bind(&loop);
260     __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
261     __ vmovl(NeonU8, q0, d0);
262     __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
263     __ cmp(dest, temp);
264     __ b(&loop, ne);
265 
266     // Do a last copy which will overlap with the previous copy (1 to 8 bytes).
267     __ rsb(chars, chars, Operand(8));
268     __ sub(src, src, Operand(chars));
269     __ sub(dest, dest, Operand(chars, LSL, 1));
270     __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
271     __ vmovl(NeonU8, q0, d0);
272     __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest));
273     __ Ret();
274   } else {
275     Register temp1 = r3;
276     Register temp2 = ip;
277     Register temp3 = lr;
278     Register temp4 = r4;
279     Label loop;
280     Label not_two;
281 
282     __ Push(lr, r4);
283     __ bic(temp2, chars, Operand(0x3));
284     __ add(temp2, dest, Operand(temp2, LSL, 1));
285 
286     __ bind(&loop);
287     __ ldr(temp1, MemOperand(src, 4, PostIndex));
288     __ uxtb16(temp3, Operand(temp1, ROR, 0));
289     __ uxtb16(temp4, Operand(temp1, ROR, 8));
290     __ pkhbt(temp1, temp3, Operand(temp4, LSL, 16));
291     __ str(temp1, MemOperand(dest));
292     __ pkhtb(temp1, temp4, Operand(temp3, ASR, 16));
293     __ str(temp1, MemOperand(dest, 4));
294     __ add(dest, dest, Operand(8));
295     __ cmp(dest, temp2);
296     __ b(&loop, ne);
297 
298     __ mov(chars, Operand(chars, LSL, 31), SetCC);  // bit0 => ne, bit1 => cs
299     __ b(&not_two, cc);
300     __ ldrh(temp1, MemOperand(src, 2, PostIndex));
301     __ uxtb(temp3, Operand(temp1, ROR, 8));
302     __ mov(temp3, Operand(temp3, LSL, 16));
303     __ uxtab(temp3, temp3, Operand(temp1, ROR, 0));
304     __ str(temp3, MemOperand(dest, 4, PostIndex));
305     __ bind(&not_two);
306     __ ldrb(temp1, MemOperand(src), ne);
307     __ strh(temp1, MemOperand(dest), ne);
308     __ Pop(pc, r4);
309   }
310 
311   CodeDesc desc;
312   masm.GetCode(&desc);
313 
314   CPU::FlushICache(buffer, actual_size);
315   OS::ProtectCode(buffer, actual_size);
316 
317   return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
318 #endif
319 }
320 #endif
321 
CreateSqrtFunction()322 UnaryMathFunction CreateSqrtFunction() {
323 #if defined(USE_SIMULATOR)
324   return &std::sqrt;
325 #else
326   size_t actual_size;
327   byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
328   if (buffer == NULL) return &std::sqrt;
329 
330   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
331 
332   __ MovFromFloatParameter(d0);
333   __ vsqrt(d0, d0);
334   __ MovToFloatResult(d0);
335   __ Ret();
336 
337   CodeDesc desc;
338   masm.GetCode(&desc);
339   ASSERT(!RelocInfo::RequiresRelocation(desc));
340 
341   CPU::FlushICache(buffer, actual_size);
342   OS::ProtectCode(buffer, actual_size);
343   return FUNCTION_CAST<UnaryMathFunction>(buffer);
344 #endif
345 }
346 
347 #undef __
348 
349 
350 // -------------------------------------------------------------------------
351 // Platform-specific RuntimeCallHelper functions.
352 
BeforeCall(MacroAssembler * masm) const353 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
354   masm->EnterFrame(StackFrame::INTERNAL);
355   ASSERT(!masm->has_frame());
356   masm->set_has_frame(true);
357 }
358 
359 
AfterCall(MacroAssembler * masm) const360 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
361   masm->LeaveFrame(StackFrame::INTERNAL);
362   ASSERT(masm->has_frame());
363   masm->set_has_frame(false);
364 }
365 
366 
367 // -------------------------------------------------------------------------
368 // Code generators
369 
370 #define __ ACCESS_MASM(masm)
371 
GenerateMapChangeElementsTransition(MacroAssembler * masm,AllocationSiteMode mode,Label * allocation_memento_found)372 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
373     MacroAssembler* masm, AllocationSiteMode mode,
374     Label* allocation_memento_found) {
375   // ----------- S t a t e -------------
376   //  -- r0    : value
377   //  -- r1    : key
378   //  -- r2    : receiver
379   //  -- lr    : return address
380   //  -- r3    : target map, scratch for subsequent call
381   //  -- r4    : scratch (elements)
382   // -----------------------------------
383   if (mode == TRACK_ALLOCATION_SITE) {
384     ASSERT(allocation_memento_found != NULL);
385     __ JumpIfJSArrayHasAllocationMemento(r2, r4, allocation_memento_found);
386   }
387 
388   // Set transitioned map.
389   __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
390   __ RecordWriteField(r2,
391                       HeapObject::kMapOffset,
392                       r3,
393                       r9,
394                       kLRHasNotBeenSaved,
395                       kDontSaveFPRegs,
396                       EMIT_REMEMBERED_SET,
397                       OMIT_SMI_CHECK);
398 }
399 
400 
GenerateSmiToDouble(MacroAssembler * masm,AllocationSiteMode mode,Label * fail)401 void ElementsTransitionGenerator::GenerateSmiToDouble(
402     MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
403   // ----------- S t a t e -------------
404   //  -- r0    : value
405   //  -- r1    : key
406   //  -- r2    : receiver
407   //  -- lr    : return address
408   //  -- r3    : target map, scratch for subsequent call
409   //  -- r4    : scratch (elements)
410   // -----------------------------------
411   Label loop, entry, convert_hole, gc_required, only_change_map, done;
412 
413   if (mode == TRACK_ALLOCATION_SITE) {
414     __ JumpIfJSArrayHasAllocationMemento(r2, r4, fail);
415   }
416 
417   // Check for empty arrays, which only require a map transition and no changes
418   // to the backing store.
419   __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
420   __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
421   __ b(eq, &only_change_map);
422 
423   __ push(lr);
424   __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
425   // r5: number of elements (smi-tagged)
426 
427   // Allocate new FixedDoubleArray.
428   // Use lr as a temporary register.
429   __ mov(lr, Operand(r5, LSL, 2));
430   __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
431   __ Allocate(lr, r6, r4, r9, &gc_required, DOUBLE_ALIGNMENT);
432   // r6: destination FixedDoubleArray, not tagged as heap object.
433   __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
434   // r4: source FixedArray.
435 
436   // Set destination FixedDoubleArray's length and map.
437   __ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
438   __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
439   // Update receiver's map.
440   __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
441 
442   __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
443   __ RecordWriteField(r2,
444                       HeapObject::kMapOffset,
445                       r3,
446                       r9,
447                       kLRHasBeenSaved,
448                       kDontSaveFPRegs,
449                       OMIT_REMEMBERED_SET,
450                       OMIT_SMI_CHECK);
451   // Replace receiver's backing store with newly created FixedDoubleArray.
452   __ add(r3, r6, Operand(kHeapObjectTag));
453   __ str(r3, FieldMemOperand(r2, JSObject::kElementsOffset));
454   __ RecordWriteField(r2,
455                       JSObject::kElementsOffset,
456                       r3,
457                       r9,
458                       kLRHasBeenSaved,
459                       kDontSaveFPRegs,
460                       EMIT_REMEMBERED_SET,
461                       OMIT_SMI_CHECK);
462 
463   // Prepare for conversion loop.
464   __ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
465   __ add(r9, r6, Operand(FixedDoubleArray::kHeaderSize));
466   __ add(r6, r9, Operand(r5, LSL, 2));
467   __ mov(r4, Operand(kHoleNanLower32));
468   __ mov(r5, Operand(kHoleNanUpper32));
469   // r3: begin of source FixedArray element fields, not tagged
470   // r4: kHoleNanLower32
471   // r5: kHoleNanUpper32
472   // r6: end of destination FixedDoubleArray, not tagged
473   // r9: begin of FixedDoubleArray element fields, not tagged
474 
475   __ b(&entry);
476 
477   __ bind(&only_change_map);
478   __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
479   __ RecordWriteField(r2,
480                       HeapObject::kMapOffset,
481                       r3,
482                       r9,
483                       kLRHasNotBeenSaved,
484                       kDontSaveFPRegs,
485                       OMIT_REMEMBERED_SET,
486                       OMIT_SMI_CHECK);
487   __ b(&done);
488 
489   // Call into runtime if GC is required.
490   __ bind(&gc_required);
491   __ pop(lr);
492   __ b(fail);
493 
494   // Convert and copy elements.
495   __ bind(&loop);
496   __ ldr(lr, MemOperand(r3, 4, PostIndex));
497   // lr: current element
498   __ UntagAndJumpIfNotSmi(lr, lr, &convert_hole);
499 
500   // Normal smi, convert to double and store.
501   __ vmov(s0, lr);
502   __ vcvt_f64_s32(d0, s0);
503   __ vstr(d0, r9, 0);
504   __ add(r9, r9, Operand(8));
505   __ b(&entry);
506 
507   // Hole found, store the-hole NaN.
508   __ bind(&convert_hole);
509   if (FLAG_debug_code) {
510     // Restore a "smi-untagged" heap object.
511     __ SmiTag(lr);
512     __ orr(lr, lr, Operand(1));
513     __ CompareRoot(lr, Heap::kTheHoleValueRootIndex);
514     __ Assert(eq, kObjectFoundInSmiOnlyArray);
515   }
516   __ Strd(r4, r5, MemOperand(r9, 8, PostIndex));
517 
518   __ bind(&entry);
519   __ cmp(r9, r6);
520   __ b(lt, &loop);
521 
522   __ pop(lr);
523   __ bind(&done);
524 }
525 
526 
GenerateDoubleToObject(MacroAssembler * masm,AllocationSiteMode mode,Label * fail)527 void ElementsTransitionGenerator::GenerateDoubleToObject(
528     MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
529   // ----------- S t a t e -------------
530   //  -- r0    : value
531   //  -- r1    : key
532   //  -- r2    : receiver
533   //  -- lr    : return address
534   //  -- r3    : target map, scratch for subsequent call
535   //  -- r4    : scratch (elements)
536   // -----------------------------------
537   Label entry, loop, convert_hole, gc_required, only_change_map;
538 
539   if (mode == TRACK_ALLOCATION_SITE) {
540     __ JumpIfJSArrayHasAllocationMemento(r2, r4, fail);
541   }
542 
543   // Check for empty arrays, which only require a map transition and no changes
544   // to the backing store.
545   __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
546   __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
547   __ b(eq, &only_change_map);
548 
549   __ push(lr);
550   __ Push(r3, r2, r1, r0);
551   __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
552   // r4: source FixedDoubleArray
553   // r5: number of elements (smi-tagged)
554 
555   // Allocate new FixedArray.
556   __ mov(r0, Operand(FixedDoubleArray::kHeaderSize));
557   __ add(r0, r0, Operand(r5, LSL, 1));
558   __ Allocate(r0, r6, r3, r9, &gc_required, NO_ALLOCATION_FLAGS);
559   // r6: destination FixedArray, not tagged as heap object
560   // Set destination FixedDoubleArray's length and map.
561   __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
562   __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
563   __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
564 
565   // Prepare for conversion loop.
566   __ add(r4, r4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
567   __ add(r3, r6, Operand(FixedArray::kHeaderSize));
568   __ add(r6, r6, Operand(kHeapObjectTag));
569   __ add(r5, r3, Operand(r5, LSL, 1));
570   __ LoadRoot(r9, Heap::kHeapNumberMapRootIndex);
571   // Using offsetted addresses in r4 to fully take advantage of post-indexing.
572   // r3: begin of destination FixedArray element fields, not tagged
573   // r4: begin of source FixedDoubleArray element fields, not tagged, +4
574   // r5: end of destination FixedArray, not tagged
575   // r6: destination FixedArray
576   // r9: heap number map
577   __ b(&entry);
578 
579   // Call into runtime if GC is required.
580   __ bind(&gc_required);
581   __ Pop(r3, r2, r1, r0);
582   __ pop(lr);
583   __ b(fail);
584 
585   __ bind(&loop);
586   __ ldr(r1, MemOperand(r4, 8, PostIndex));
587   // r1: current element's upper 32 bit
588   // r4: address of next element's upper 32 bit
589   __ cmp(r1, Operand(kHoleNanUpper32));
590   __ b(eq, &convert_hole);
591 
592   // Non-hole double, copy value into a heap number.
593   __ AllocateHeapNumber(r2, r0, lr, r9, &gc_required);
594   // r2: new heap number
595   __ ldr(r0, MemOperand(r4, 12, NegOffset));
596   __ Strd(r0, r1, FieldMemOperand(r2, HeapNumber::kValueOffset));
597   __ mov(r0, r3);
598   __ str(r2, MemOperand(r3, 4, PostIndex));
599   __ RecordWrite(r6,
600                  r0,
601                  r2,
602                  kLRHasBeenSaved,
603                  kDontSaveFPRegs,
604                  EMIT_REMEMBERED_SET,
605                  OMIT_SMI_CHECK);
606   __ b(&entry);
607 
608   // Replace the-hole NaN with the-hole pointer.
609   __ bind(&convert_hole);
610   __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
611   __ str(r0, MemOperand(r3, 4, PostIndex));
612 
613   __ bind(&entry);
614   __ cmp(r3, r5);
615   __ b(lt, &loop);
616 
617   __ Pop(r3, r2, r1, r0);
618   // Replace receiver's backing store with newly created and filled FixedArray.
619   __ str(r6, FieldMemOperand(r2, JSObject::kElementsOffset));
620   __ RecordWriteField(r2,
621                       JSObject::kElementsOffset,
622                       r6,
623                       r9,
624                       kLRHasBeenSaved,
625                       kDontSaveFPRegs,
626                       EMIT_REMEMBERED_SET,
627                       OMIT_SMI_CHECK);
628   __ pop(lr);
629 
630   __ bind(&only_change_map);
631   // Update receiver's map.
632   __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
633   __ RecordWriteField(r2,
634                       HeapObject::kMapOffset,
635                       r3,
636                       r9,
637                       kLRHasNotBeenSaved,
638                       kDontSaveFPRegs,
639                       OMIT_REMEMBERED_SET,
640                       OMIT_SMI_CHECK);
641 }
642 
643 
Generate(MacroAssembler * masm,Register string,Register index,Register result,Label * call_runtime)644 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
645                                        Register string,
646                                        Register index,
647                                        Register result,
648                                        Label* call_runtime) {
649   // Fetch the instance type of the receiver into result register.
650   __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
651   __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
652 
653   // We need special handling for indirect strings.
654   Label check_sequential;
655   __ tst(result, Operand(kIsIndirectStringMask));
656   __ b(eq, &check_sequential);
657 
658   // Dispatch on the indirect string shape: slice or cons.
659   Label cons_string;
660   __ tst(result, Operand(kSlicedNotConsMask));
661   __ b(eq, &cons_string);
662 
663   // Handle slices.
664   Label indirect_string_loaded;
665   __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
666   __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
667   __ add(index, index, Operand::SmiUntag(result));
668   __ jmp(&indirect_string_loaded);
669 
670   // Handle cons strings.
671   // Check whether the right hand side is the empty string (i.e. if
672   // this is really a flat string in a cons string). If that is not
673   // the case we would rather go to the runtime system now to flatten
674   // the string.
675   __ bind(&cons_string);
676   __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
677   __ CompareRoot(result, Heap::kempty_stringRootIndex);
678   __ b(ne, call_runtime);
679   // Get the first of the two strings and load its instance type.
680   __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
681 
682   __ bind(&indirect_string_loaded);
683   __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
684   __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
685 
686   // Distinguish sequential and external strings. Only these two string
687   // representations can reach here (slices and flat cons strings have been
688   // reduced to the underlying sequential or external string).
689   Label external_string, check_encoding;
690   __ bind(&check_sequential);
691   STATIC_ASSERT(kSeqStringTag == 0);
692   __ tst(result, Operand(kStringRepresentationMask));
693   __ b(ne, &external_string);
694 
695   // Prepare sequential strings
696   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
697   __ add(string,
698          string,
699          Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
700   __ jmp(&check_encoding);
701 
702   // Handle external strings.
703   __ bind(&external_string);
704   if (FLAG_debug_code) {
705     // Assert that we do not have a cons or slice (indirect strings) here.
706     // Sequential strings have already been ruled out.
707     __ tst(result, Operand(kIsIndirectStringMask));
708     __ Assert(eq, kExternalStringExpectedButNotFound);
709   }
710   // Rule out short external strings.
711   STATIC_ASSERT(kShortExternalStringTag != 0);
712   __ tst(result, Operand(kShortExternalStringMask));
713   __ b(ne, call_runtime);
714   __ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
715 
716   Label ascii, done;
717   __ bind(&check_encoding);
718   STATIC_ASSERT(kTwoByteStringTag == 0);
719   __ tst(result, Operand(kStringEncodingMask));
720   __ b(ne, &ascii);
721   // Two-byte string.
722   __ ldrh(result, MemOperand(string, index, LSL, 1));
723   __ jmp(&done);
724   __ bind(&ascii);
725   // Ascii string.
726   __ ldrb(result, MemOperand(string, index));
727   __ bind(&done);
728 }
729 
730 
ExpConstant(int index,Register base)731 static MemOperand ExpConstant(int index, Register base) {
732   return MemOperand(base, index * kDoubleSize);
733 }
734 
735 
EmitMathExp(MacroAssembler * masm,DwVfpRegister input,DwVfpRegister result,DwVfpRegister double_scratch1,DwVfpRegister double_scratch2,Register temp1,Register temp2,Register temp3)736 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
737                                    DwVfpRegister input,
738                                    DwVfpRegister result,
739                                    DwVfpRegister double_scratch1,
740                                    DwVfpRegister double_scratch2,
741                                    Register temp1,
742                                    Register temp2,
743                                    Register temp3) {
744   ASSERT(!input.is(result));
745   ASSERT(!input.is(double_scratch1));
746   ASSERT(!input.is(double_scratch2));
747   ASSERT(!result.is(double_scratch1));
748   ASSERT(!result.is(double_scratch2));
749   ASSERT(!double_scratch1.is(double_scratch2));
750   ASSERT(!temp1.is(temp2));
751   ASSERT(!temp1.is(temp3));
752   ASSERT(!temp2.is(temp3));
753   ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
754 
755   Label zero, infinity, done;
756 
757   __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
758 
759   __ vldr(double_scratch1, ExpConstant(0, temp3));
760   __ VFPCompareAndSetFlags(double_scratch1, input);
761   __ b(ge, &zero);
762 
763   __ vldr(double_scratch2, ExpConstant(1, temp3));
764   __ VFPCompareAndSetFlags(input, double_scratch2);
765   __ b(ge, &infinity);
766 
767   __ vldr(double_scratch1, ExpConstant(3, temp3));
768   __ vldr(result, ExpConstant(4, temp3));
769   __ vmul(double_scratch1, double_scratch1, input);
770   __ vadd(double_scratch1, double_scratch1, result);
771   __ VmovLow(temp2, double_scratch1);
772   __ vsub(double_scratch1, double_scratch1, result);
773   __ vldr(result, ExpConstant(6, temp3));
774   __ vldr(double_scratch2, ExpConstant(5, temp3));
775   __ vmul(double_scratch1, double_scratch1, double_scratch2);
776   __ vsub(double_scratch1, double_scratch1, input);
777   __ vsub(result, result, double_scratch1);
778   __ vmul(double_scratch2, double_scratch1, double_scratch1);
779   __ vmul(result, result, double_scratch2);
780   __ vldr(double_scratch2, ExpConstant(7, temp3));
781   __ vmul(result, result, double_scratch2);
782   __ vsub(result, result, double_scratch1);
783   // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
784   ASSERT(*reinterpret_cast<double*>
785          (ExternalReference::math_exp_constants(8).address()) == 1);
786   __ vmov(double_scratch2, 1);
787   __ vadd(result, result, double_scratch2);
788   __ mov(temp1, Operand(temp2, LSR, 11));
789   __ Ubfx(temp2, temp2, 0, 11);
790   __ add(temp1, temp1, Operand(0x3ff));
791 
792   // Must not call ExpConstant() after overwriting temp3!
793   __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
794   __ add(temp3, temp3, Operand(temp2, LSL, 3));
795   __ ldm(ia, temp3, temp2.bit() | temp3.bit());
796   // The first word is loaded is the lower number register.
797   if (temp2.code() < temp3.code()) {
798     __ orr(temp1, temp3, Operand(temp1, LSL, 20));
799     __ vmov(double_scratch1, temp2, temp1);
800   } else {
801     __ orr(temp1, temp2, Operand(temp1, LSL, 20));
802     __ vmov(double_scratch1, temp3, temp1);
803   }
804   __ vmul(result, result, double_scratch1);
805   __ b(&done);
806 
807   __ bind(&zero);
808   __ vmov(result, kDoubleRegZero);
809   __ b(&done);
810 
811   __ bind(&infinity);
812   __ vldr(result, ExpConstant(2, temp3));
813 
814   __ bind(&done);
815 }
816 
817 #undef __
818 
819 #ifdef DEBUG
820 // add(r0, pc, Operand(-8))
821 static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
822 #endif
823 
CodeAgingHelper()824 CodeAgingHelper::CodeAgingHelper() {
825   ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
826   // Since patcher is a large object, allocate it dynamically when needed,
827   // to avoid overloading the stack in stress conditions.
828   // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
829   // the process, before ARM simulator ICache is setup.
830   SmartPointer<CodePatcher> patcher(
831       new CodePatcher(young_sequence_.start(),
832                       young_sequence_.length() / Assembler::kInstrSize,
833                       CodePatcher::DONT_FLUSH));
834   PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
835   patcher->masm()->PushFixedFrame(r1);
836   patcher->masm()->nop(ip.code());
837   patcher->masm()->add(
838       fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
839 }
840 
841 
842 #ifdef DEBUG
IsOld(byte * candidate) const843 bool CodeAgingHelper::IsOld(byte* candidate) const {
844   return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
845 }
846 #endif
847 
848 
IsYoungSequence(Isolate * isolate,byte * sequence)849 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
850   bool result = isolate->code_aging_helper()->IsYoung(sequence);
851   ASSERT(result || isolate->code_aging_helper()->IsOld(sequence));
852   return result;
853 }
854 
855 
GetCodeAgeAndParity(Isolate * isolate,byte * sequence,Age * age,MarkingParity * parity)856 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
857                                MarkingParity* parity) {
858   if (IsYoungSequence(isolate, sequence)) {
859     *age = kNoAgeCodeAge;
860     *parity = NO_MARKING_PARITY;
861   } else {
862     Address target_address = Memory::Address_at(
863         sequence + (kNoCodeAgeSequenceLength - Assembler::kInstrSize));
864     Code* stub = GetCodeFromTargetAddress(target_address);
865     GetCodeAgeAndParity(stub, age, parity);
866   }
867 }
868 
869 
PatchPlatformCodeAge(Isolate * isolate,byte * sequence,Code::Age age,MarkingParity parity)870 void Code::PatchPlatformCodeAge(Isolate* isolate,
871                                 byte* sequence,
872                                 Code::Age age,
873                                 MarkingParity parity) {
874   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
875   if (age == kNoAgeCodeAge) {
876     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
877     CPU::FlushICache(sequence, young_length);
878   } else {
879     Code* stub = GetCodeAgeStub(isolate, age, parity);
880     CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
881     patcher.masm()->add(r0, pc, Operand(-8));
882     patcher.masm()->ldr(pc, MemOperand(pc, -4));
883     patcher.masm()->emit_code_stub_address(stub);
884   }
885 }
886 
887 
888 } }  // namespace v8::internal
889 
890 #endif  // V8_TARGET_ARCH_ARM
891