• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/arm/codegen-arm.h"
6 
7 #if V8_TARGET_ARCH_ARM
8 
9 #include "src/arm/simulator-arm.h"
10 #include "src/codegen.h"
11 #include "src/macro-assembler.h"
12 
13 namespace v8 {
14 namespace internal {
15 
16 
17 #define __ masm.
18 
19 #if defined(V8_HOST_ARCH_ARM)
CreateMemCopyUint8Function(Isolate * isolate,MemCopyUint8Function stub)20 MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
21                                                 MemCopyUint8Function stub) {
22 #if defined(USE_SIMULATOR)
23   return stub;
24 #else
25   if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
26   size_t actual_size;
27   byte* buffer =
28       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
29   if (buffer == nullptr) return stub;
30 
31   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
32                       CodeObjectRequired::kNo);
33 
34   Register dest = r0;
35   Register src = r1;
36   Register chars = r2;
37   Register temp1 = r3;
38   Label less_4;
39 
40   if (CpuFeatures::IsSupported(NEON)) {
41     Label loop, less_256, less_128, less_64, less_32, _16_or_less, _8_or_less;
42     Label size_less_than_8;
43     __ pld(MemOperand(src, 0));
44 
45     __ cmp(chars, Operand(8));
46     __ b(lt, &size_less_than_8);
47     __ cmp(chars, Operand(32));
48     __ b(lt, &less_32);
49     if (CpuFeatures::dcache_line_size() == 32) {
50       __ pld(MemOperand(src, 32));
51     }
52     __ cmp(chars, Operand(64));
53     __ b(lt, &less_64);
54     __ pld(MemOperand(src, 64));
55     if (CpuFeatures::dcache_line_size() == 32) {
56       __ pld(MemOperand(src, 96));
57     }
58     __ cmp(chars, Operand(128));
59     __ b(lt, &less_128);
60     __ pld(MemOperand(src, 128));
61     if (CpuFeatures::dcache_line_size() == 32) {
62       __ pld(MemOperand(src, 160));
63     }
64     __ pld(MemOperand(src, 192));
65     if (CpuFeatures::dcache_line_size() == 32) {
66       __ pld(MemOperand(src, 224));
67     }
68     __ cmp(chars, Operand(256));
69     __ b(lt, &less_256);
70     __ sub(chars, chars, Operand(256));
71 
72     __ bind(&loop);
73     __ pld(MemOperand(src, 256));
74     __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
75     if (CpuFeatures::dcache_line_size() == 32) {
76       __ pld(MemOperand(src, 256));
77     }
78     __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
79     __ sub(chars, chars, Operand(64), SetCC);
80     __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
81     __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
82     __ b(ge, &loop);
83     __ add(chars, chars, Operand(256));
84 
85     __ bind(&less_256);
86     __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
87     __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
88     __ sub(chars, chars, Operand(128));
89     __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
90     __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
91     __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
92     __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
93     __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
94     __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
95     __ cmp(chars, Operand(64));
96     __ b(lt, &less_64);
97 
98     __ bind(&less_128);
99     __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
100     __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
101     __ sub(chars, chars, Operand(64));
102     __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
103     __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
104 
105     __ bind(&less_64);
106     __ cmp(chars, Operand(32));
107     __ b(lt, &less_32);
108     __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
109     __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
110     __ sub(chars, chars, Operand(32));
111 
112     __ bind(&less_32);
113     __ cmp(chars, Operand(16));
114     __ b(le, &_16_or_less);
115     __ vld1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(src, PostIndex));
116     __ vst1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
117     __ sub(chars, chars, Operand(16));
118 
119     __ bind(&_16_or_less);
120     __ cmp(chars, Operand(8));
121     __ b(le, &_8_or_less);
122     __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
123     __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest, PostIndex));
124     __ sub(chars, chars, Operand(8));
125 
126     // Do a last copy which may overlap with the previous copy (up to 8 bytes).
127     __ bind(&_8_or_less);
128     __ rsb(chars, chars, Operand(8));
129     __ sub(src, src, Operand(chars));
130     __ sub(dest, dest, Operand(chars));
131     __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
132     __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest));
133 
134     __ Ret();
135 
136     __ bind(&size_less_than_8);
137 
138     __ bic(temp1, chars, Operand(0x3), SetCC);
139     __ b(&less_4, eq);
140     __ ldr(temp1, MemOperand(src, 4, PostIndex));
141     __ str(temp1, MemOperand(dest, 4, PostIndex));
142   } else {
143     Register temp2 = ip;
144     Label loop;
145 
146     __ bic(temp2, chars, Operand(0x3), SetCC);
147     __ b(&less_4, eq);
148     __ add(temp2, dest, temp2);
149 
150     __ bind(&loop);
151     __ ldr(temp1, MemOperand(src, 4, PostIndex));
152     __ str(temp1, MemOperand(dest, 4, PostIndex));
153     __ cmp(dest, temp2);
154     __ b(&loop, ne);
155   }
156 
157   __ bind(&less_4);
158   __ mov(chars, Operand(chars, LSL, 31), SetCC);
159   // bit0 => Z (ne), bit1 => C (cs)
160   __ ldrh(temp1, MemOperand(src, 2, PostIndex), cs);
161   __ strh(temp1, MemOperand(dest, 2, PostIndex), cs);
162   __ ldrb(temp1, MemOperand(src), ne);
163   __ strb(temp1, MemOperand(dest), ne);
164   __ Ret();
165 
166   CodeDesc desc;
167   masm.GetCode(&desc);
168   DCHECK(!RelocInfo::RequiresRelocation(desc));
169 
170   Assembler::FlushICache(isolate, buffer, actual_size);
171   base::OS::ProtectCode(buffer, actual_size);
172   return FUNCTION_CAST<MemCopyUint8Function>(buffer);
173 #endif
174 }
175 
176 
177 // Convert 8 to 16. The number of character to copy must be at least 8.
CreateMemCopyUint16Uint8Function(Isolate * isolate,MemCopyUint16Uint8Function stub)178 MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
179     Isolate* isolate, MemCopyUint16Uint8Function stub) {
180 #if defined(USE_SIMULATOR)
181   return stub;
182 #else
183   if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
184   size_t actual_size;
185   byte* buffer =
186       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
187   if (buffer == nullptr) return stub;
188 
189   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
190                       CodeObjectRequired::kNo);
191 
192   Register dest = r0;
193   Register src = r1;
194   Register chars = r2;
195   if (CpuFeatures::IsSupported(NEON)) {
196     Register temp = r3;
197     Label loop;
198 
199     __ bic(temp, chars, Operand(0x7));
200     __ sub(chars, chars, Operand(temp));
201     __ add(temp, dest, Operand(temp, LSL, 1));
202 
203     __ bind(&loop);
204     __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
205     __ vmovl(NeonU8, q0, d0);
206     __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
207     __ cmp(dest, temp);
208     __ b(&loop, ne);
209 
210     // Do a last copy which will overlap with the previous copy (1 to 8 bytes).
211     __ rsb(chars, chars, Operand(8));
212     __ sub(src, src, Operand(chars));
213     __ sub(dest, dest, Operand(chars, LSL, 1));
214     __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
215     __ vmovl(NeonU8, q0, d0);
216     __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest));
217     __ Ret();
218   } else {
219     Register temp1 = r3;
220     Register temp2 = ip;
221     Register temp3 = lr;
222     Register temp4 = r4;
223     Label loop;
224     Label not_two;
225 
226     __ Push(lr, r4);
227     __ bic(temp2, chars, Operand(0x3));
228     __ add(temp2, dest, Operand(temp2, LSL, 1));
229 
230     __ bind(&loop);
231     __ ldr(temp1, MemOperand(src, 4, PostIndex));
232     __ uxtb16(temp3, temp1);
233     __ uxtb16(temp4, temp1, 8);
234     __ pkhbt(temp1, temp3, Operand(temp4, LSL, 16));
235     __ str(temp1, MemOperand(dest));
236     __ pkhtb(temp1, temp4, Operand(temp3, ASR, 16));
237     __ str(temp1, MemOperand(dest, 4));
238     __ add(dest, dest, Operand(8));
239     __ cmp(dest, temp2);
240     __ b(&loop, ne);
241 
242     __ mov(chars, Operand(chars, LSL, 31), SetCC);  // bit0 => ne, bit1 => cs
243     __ b(&not_two, cc);
244     __ ldrh(temp1, MemOperand(src, 2, PostIndex));
245     __ uxtb(temp3, temp1, 8);
246     __ mov(temp3, Operand(temp3, LSL, 16));
247     __ uxtab(temp3, temp3, temp1);
248     __ str(temp3, MemOperand(dest, 4, PostIndex));
249     __ bind(&not_two);
250     __ ldrb(temp1, MemOperand(src), ne);
251     __ strh(temp1, MemOperand(dest), ne);
252     __ Pop(pc, r4);
253   }
254 
255   CodeDesc desc;
256   masm.GetCode(&desc);
257 
258   Assembler::FlushICache(isolate, buffer, actual_size);
259   base::OS::ProtectCode(buffer, actual_size);
260 
261   return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
262 #endif
263 }
264 #endif
265 
CreateSqrtFunction(Isolate * isolate)266 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
267 #if defined(USE_SIMULATOR)
268   return nullptr;
269 #else
270   size_t actual_size;
271   byte* buffer =
272       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
273   if (buffer == nullptr) return nullptr;
274 
275   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
276                       CodeObjectRequired::kNo);
277 
278   __ MovFromFloatParameter(d0);
279   __ vsqrt(d0, d0);
280   __ MovToFloatResult(d0);
281   __ Ret();
282 
283   CodeDesc desc;
284   masm.GetCode(&desc);
285   DCHECK(!RelocInfo::RequiresRelocation(desc));
286 
287   Assembler::FlushICache(isolate, buffer, actual_size);
288   base::OS::ProtectCode(buffer, actual_size);
289   return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
290 #endif
291 }
292 
293 #undef __
294 
295 
296 // -------------------------------------------------------------------------
297 // Platform-specific RuntimeCallHelper functions.
298 
BeforeCall(MacroAssembler * masm) const299 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
300   masm->EnterFrame(StackFrame::INTERNAL);
301   DCHECK(!masm->has_frame());
302   masm->set_has_frame(true);
303 }
304 
305 
AfterCall(MacroAssembler * masm) const306 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
307   masm->LeaveFrame(StackFrame::INTERNAL);
308   DCHECK(masm->has_frame());
309   masm->set_has_frame(false);
310 }
311 
312 
313 // -------------------------------------------------------------------------
314 // Code generators
315 
316 #define __ ACCESS_MASM(masm)
317 
GenerateMapChangeElementsTransition(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * allocation_memento_found)318 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
319     MacroAssembler* masm,
320     Register receiver,
321     Register key,
322     Register value,
323     Register target_map,
324     AllocationSiteMode mode,
325     Label* allocation_memento_found) {
326   Register scratch_elements = r4;
327   DCHECK(!AreAliased(receiver, key, value, target_map,
328                      scratch_elements));
329 
330   if (mode == TRACK_ALLOCATION_SITE) {
331     DCHECK(allocation_memento_found != NULL);
332     __ JumpIfJSArrayHasAllocationMemento(
333         receiver, scratch_elements, allocation_memento_found);
334   }
335 
336   // Set transitioned map.
337   __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
338   __ RecordWriteField(receiver,
339                       HeapObject::kMapOffset,
340                       target_map,
341                       r9,
342                       kLRHasNotBeenSaved,
343                       kDontSaveFPRegs,
344                       EMIT_REMEMBERED_SET,
345                       OMIT_SMI_CHECK);
346 }
347 
348 
GenerateSmiToDouble(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * fail)349 void ElementsTransitionGenerator::GenerateSmiToDouble(
350     MacroAssembler* masm,
351     Register receiver,
352     Register key,
353     Register value,
354     Register target_map,
355     AllocationSiteMode mode,
356     Label* fail) {
357   // Register lr contains the return address.
358   Label loop, entry, convert_hole, gc_required, only_change_map, done;
359   Register elements = r4;
360   Register length = r5;
361   Register array = r6;
362   Register array_end = array;
363 
364   // target_map parameter can be clobbered.
365   Register scratch1 = target_map;
366   Register scratch2 = r9;
367 
368   // Verify input registers don't conflict with locals.
369   DCHECK(!AreAliased(receiver, key, value, target_map,
370                      elements, length, array, scratch2));
371 
372   if (mode == TRACK_ALLOCATION_SITE) {
373     __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
374   }
375 
376   // Check for empty arrays, which only require a map transition and no changes
377   // to the backing store.
378   __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
379   __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
380   __ b(eq, &only_change_map);
381 
382   __ push(lr);
383   __ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
384   // length: number of elements (smi-tagged)
385 
386   // Allocate new FixedDoubleArray.
387   // Use lr as a temporary register.
388   __ mov(lr, Operand(length, LSL, 2));
389   __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
390   __ Allocate(lr, array, elements, scratch2, &gc_required, DOUBLE_ALIGNMENT);
391   __ sub(array, array, Operand(kHeapObjectTag));
392   // array: destination FixedDoubleArray, not tagged as heap object.
393   __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
394   // r4: source FixedArray.
395 
396   // Set destination FixedDoubleArray's length and map.
397   __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
398   __ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
399   // Update receiver's map.
400   __ str(scratch2, MemOperand(array, HeapObject::kMapOffset));
401 
402   __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
403   __ RecordWriteField(receiver,
404                       HeapObject::kMapOffset,
405                       target_map,
406                       scratch2,
407                       kLRHasBeenSaved,
408                       kDontSaveFPRegs,
409                       OMIT_REMEMBERED_SET,
410                       OMIT_SMI_CHECK);
411   // Replace receiver's backing store with newly created FixedDoubleArray.
412   __ add(scratch1, array, Operand(kHeapObjectTag));
413   __ str(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
414   __ RecordWriteField(receiver,
415                       JSObject::kElementsOffset,
416                       scratch1,
417                       scratch2,
418                       kLRHasBeenSaved,
419                       kDontSaveFPRegs,
420                       EMIT_REMEMBERED_SET,
421                       OMIT_SMI_CHECK);
422 
423   // Prepare for conversion loop.
424   __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
425   __ add(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
426   __ add(array_end, scratch2, Operand(length, LSL, 2));
427 
428   // Repurpose registers no longer in use.
429   Register hole_lower = elements;
430   Register hole_upper = length;
431 
432   __ mov(hole_lower, Operand(kHoleNanLower32));
433   __ mov(hole_upper, Operand(kHoleNanUpper32));
434   // scratch1: begin of source FixedArray element fields, not tagged
435   // hole_lower: kHoleNanLower32
436   // hole_upper: kHoleNanUpper32
437   // array_end: end of destination FixedDoubleArray, not tagged
438   // scratch2: begin of FixedDoubleArray element fields, not tagged
439 
440   __ b(&entry);
441 
442   __ bind(&only_change_map);
443   __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
444   __ RecordWriteField(receiver,
445                       HeapObject::kMapOffset,
446                       target_map,
447                       scratch2,
448                       kLRHasNotBeenSaved,
449                       kDontSaveFPRegs,
450                       OMIT_REMEMBERED_SET,
451                       OMIT_SMI_CHECK);
452   __ b(&done);
453 
454   // Call into runtime if GC is required.
455   __ bind(&gc_required);
456   __ pop(lr);
457   __ b(fail);
458 
459   // Convert and copy elements.
460   __ bind(&loop);
461   __ ldr(lr, MemOperand(scratch1, 4, PostIndex));
462   // lr: current element
463   __ UntagAndJumpIfNotSmi(lr, lr, &convert_hole);
464 
465   // Normal smi, convert to double and store.
466   __ vmov(s0, lr);
467   __ vcvt_f64_s32(d0, s0);
468   __ vstr(d0, scratch2, 0);
469   __ add(scratch2, scratch2, Operand(8));
470   __ b(&entry);
471 
472   // Hole found, store the-hole NaN.
473   __ bind(&convert_hole);
474   if (FLAG_debug_code) {
475     // Restore a "smi-untagged" heap object.
476     __ SmiTag(lr);
477     __ orr(lr, lr, Operand(1));
478     __ CompareRoot(lr, Heap::kTheHoleValueRootIndex);
479     __ Assert(eq, kObjectFoundInSmiOnlyArray);
480   }
481   __ Strd(hole_lower, hole_upper, MemOperand(scratch2, 8, PostIndex));
482 
483   __ bind(&entry);
484   __ cmp(scratch2, array_end);
485   __ b(lt, &loop);
486 
487   __ pop(lr);
488   __ bind(&done);
489 }
490 
491 
GenerateDoubleToObject(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * fail)492 void ElementsTransitionGenerator::GenerateDoubleToObject(
493     MacroAssembler* masm,
494     Register receiver,
495     Register key,
496     Register value,
497     Register target_map,
498     AllocationSiteMode mode,
499     Label* fail) {
500   // Register lr contains the return address.
501   Label entry, loop, convert_hole, gc_required, only_change_map;
502   Register elements = r4;
503   Register array = r6;
504   Register length = r5;
505   Register scratch = r9;
506 
507   // Verify input registers don't conflict with locals.
508   DCHECK(!AreAliased(receiver, key, value, target_map,
509                      elements, array, length, scratch));
510 
511   if (mode == TRACK_ALLOCATION_SITE) {
512     __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
513   }
514 
515   // Check for empty arrays, which only require a map transition and no changes
516   // to the backing store.
517   __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
518   __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
519   __ b(eq, &only_change_map);
520 
521   __ push(lr);
522   __ Push(target_map, receiver, key, value);
523   __ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
524   // elements: source FixedDoubleArray
525   // length: number of elements (smi-tagged)
526 
527   // Allocate new FixedArray.
528   // Re-use value and target_map registers, as they have been saved on the
529   // stack.
530   Register array_size = value;
531   Register allocate_scratch = target_map;
532   __ mov(array_size, Operand(FixedDoubleArray::kHeaderSize));
533   __ add(array_size, array_size, Operand(length, LSL, 1));
534   __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
535               NO_ALLOCATION_FLAGS);
536   // array: destination FixedArray, tagged as heap object
537   // Set destination FixedDoubleArray's length and map.
538   __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
539   __ str(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
540   __ str(scratch, FieldMemOperand(array, HeapObject::kMapOffset));
541 
542   __ sub(array, array, Operand(kHeapObjectTag));
543 
544   // Prepare for conversion loop.
545   Register src_elements = elements;
546   Register dst_elements = target_map;
547   Register dst_end = length;
548   Register heap_number_map = scratch;
549   __ add(src_elements, elements,
550          Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
551   __ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
552   __ add(dst_end, dst_elements, Operand(length, LSL, 1));
553 
554   // Allocating heap numbers in the loop below can fail and cause a jump to
555   // gc_required. We can't leave a partly initialized FixedArray behind,
556   // so pessimistically fill it with holes now.
557   Label initialization_loop, initialization_loop_entry;
558   __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
559   __ b(&initialization_loop_entry);
560   __ bind(&initialization_loop);
561   __ str(scratch, MemOperand(dst_elements, kPointerSize, PostIndex));
562   __ bind(&initialization_loop_entry);
563   __ cmp(dst_elements, dst_end);
564   __ b(lt, &initialization_loop);
565 
566   __ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
567   __ add(array, array, Operand(kHeapObjectTag));
568   __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
569   // Using offsetted addresses in src_elements to fully take advantage of
570   // post-indexing.
571   // dst_elements: begin of destination FixedArray element fields, not tagged
572   // src_elements: begin of source FixedDoubleArray element fields,
573   //               not tagged, +4
574   // dst_end: end of destination FixedArray, not tagged
575   // array: destination FixedArray
576   // heap_number_map: heap number map
577   __ b(&entry);
578 
579   // Call into runtime if GC is required.
580   __ bind(&gc_required);
581   __ Pop(target_map, receiver, key, value);
582   __ pop(lr);
583   __ b(fail);
584 
585   __ bind(&loop);
586   Register upper_bits = key;
587   __ ldr(upper_bits, MemOperand(src_elements, 8, PostIndex));
588   // upper_bits: current element's upper 32 bit
589   // src_elements: address of next element's upper 32 bit
590   __ cmp(upper_bits, Operand(kHoleNanUpper32));
591   __ b(eq, &convert_hole);
592 
593   // Non-hole double, copy value into a heap number.
594   Register heap_number = receiver;
595   Register scratch2 = value;
596   __ AllocateHeapNumber(heap_number, scratch2, lr, heap_number_map,
597                         &gc_required);
598   // heap_number: new heap number
599   __ ldr(scratch2, MemOperand(src_elements, 12, NegOffset));
600   __ Strd(scratch2, upper_bits,
601           FieldMemOperand(heap_number, HeapNumber::kValueOffset));
602   __ mov(scratch2, dst_elements);
603   __ str(heap_number, MemOperand(dst_elements, 4, PostIndex));
604   __ RecordWrite(array,
605                  scratch2,
606                  heap_number,
607                  kLRHasBeenSaved,
608                  kDontSaveFPRegs,
609                  EMIT_REMEMBERED_SET,
610                  OMIT_SMI_CHECK);
611   __ b(&entry);
612 
613   // Replace the-hole NaN with the-hole pointer.
614   __ bind(&convert_hole);
615   __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
616   __ str(scratch2, MemOperand(dst_elements, 4, PostIndex));
617 
618   __ bind(&entry);
619   __ cmp(dst_elements, dst_end);
620   __ b(lt, &loop);
621 
622   __ Pop(target_map, receiver, key, value);
623   // Replace receiver's backing store with newly created and filled FixedArray.
624   __ str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
625   __ RecordWriteField(receiver,
626                       JSObject::kElementsOffset,
627                       array,
628                       scratch,
629                       kLRHasBeenSaved,
630                       kDontSaveFPRegs,
631                       EMIT_REMEMBERED_SET,
632                       OMIT_SMI_CHECK);
633   __ pop(lr);
634 
635   __ bind(&only_change_map);
636   // Update receiver's map.
637   __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
638   __ RecordWriteField(receiver,
639                       HeapObject::kMapOffset,
640                       target_map,
641                       scratch,
642                       kLRHasNotBeenSaved,
643                       kDontSaveFPRegs,
644                       OMIT_REMEMBERED_SET,
645                       OMIT_SMI_CHECK);
646 }
647 
648 
Generate(MacroAssembler * masm,Register string,Register index,Register result,Label * call_runtime)649 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
650                                        Register string,
651                                        Register index,
652                                        Register result,
653                                        Label* call_runtime) {
654   // Fetch the instance type of the receiver into result register.
655   __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
656   __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
657 
658   // We need special handling for indirect strings.
659   Label check_sequential;
660   __ tst(result, Operand(kIsIndirectStringMask));
661   __ b(eq, &check_sequential);
662 
663   // Dispatch on the indirect string shape: slice or cons.
664   Label cons_string;
665   __ tst(result, Operand(kSlicedNotConsMask));
666   __ b(eq, &cons_string);
667 
668   // Handle slices.
669   Label indirect_string_loaded;
670   __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
671   __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
672   __ add(index, index, Operand::SmiUntag(result));
673   __ jmp(&indirect_string_loaded);
674 
675   // Handle cons strings.
676   // Check whether the right hand side is the empty string (i.e. if
677   // this is really a flat string in a cons string). If that is not
678   // the case we would rather go to the runtime system now to flatten
679   // the string.
680   __ bind(&cons_string);
681   __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
682   __ CompareRoot(result, Heap::kempty_stringRootIndex);
683   __ b(ne, call_runtime);
684   // Get the first of the two strings and load its instance type.
685   __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
686 
687   __ bind(&indirect_string_loaded);
688   __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
689   __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
690 
691   // Distinguish sequential and external strings. Only these two string
692   // representations can reach here (slices and flat cons strings have been
693   // reduced to the underlying sequential or external string).
694   Label external_string, check_encoding;
695   __ bind(&check_sequential);
696   STATIC_ASSERT(kSeqStringTag == 0);
697   __ tst(result, Operand(kStringRepresentationMask));
698   __ b(ne, &external_string);
699 
700   // Prepare sequential strings
701   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
702   __ add(string,
703          string,
704          Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
705   __ jmp(&check_encoding);
706 
707   // Handle external strings.
708   __ bind(&external_string);
709   if (FLAG_debug_code) {
710     // Assert that we do not have a cons or slice (indirect strings) here.
711     // Sequential strings have already been ruled out.
712     __ tst(result, Operand(kIsIndirectStringMask));
713     __ Assert(eq, kExternalStringExpectedButNotFound);
714   }
715   // Rule out short external strings.
716   STATIC_ASSERT(kShortExternalStringTag != 0);
717   __ tst(result, Operand(kShortExternalStringMask));
718   __ b(ne, call_runtime);
719   __ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
720 
721   Label one_byte, done;
722   __ bind(&check_encoding);
723   STATIC_ASSERT(kTwoByteStringTag == 0);
724   __ tst(result, Operand(kStringEncodingMask));
725   __ b(ne, &one_byte);
726   // Two-byte string.
727   __ ldrh(result, MemOperand(string, index, LSL, 1));
728   __ jmp(&done);
729   __ bind(&one_byte);
730   // One-byte string.
731   __ ldrb(result, MemOperand(string, index));
732   __ bind(&done);
733 }
734 
735 #undef __
736 
737 #ifdef DEBUG
738 // add(r0, pc, Operand(-8))
739 static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
740 #endif
741 
CodeAgingHelper(Isolate * isolate)742 CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
743   USE(isolate);
744   DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
745   // Since patcher is a large object, allocate it dynamically when needed,
746   // to avoid overloading the stack in stress conditions.
747   // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
748   // the process, before ARM simulator ICache is setup.
749   base::SmartPointer<CodePatcher> patcher(
750       new CodePatcher(isolate, young_sequence_.start(),
751                       young_sequence_.length() / Assembler::kInstrSize,
752                       CodePatcher::DONT_FLUSH));
753   PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
754   patcher->masm()->PushStandardFrame(r1);
755   patcher->masm()->nop(ip.code());
756 }
757 
758 
759 #ifdef DEBUG
IsOld(byte * candidate) const760 bool CodeAgingHelper::IsOld(byte* candidate) const {
761   return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
762 }
763 #endif
764 
765 
IsYoungSequence(Isolate * isolate,byte * sequence)766 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
767   bool result = isolate->code_aging_helper()->IsYoung(sequence);
768   DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
769   return result;
770 }
771 
772 
GetCodeAgeAndParity(Isolate * isolate,byte * sequence,Age * age,MarkingParity * parity)773 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
774                                MarkingParity* parity) {
775   if (IsYoungSequence(isolate, sequence)) {
776     *age = kNoAgeCodeAge;
777     *parity = NO_MARKING_PARITY;
778   } else {
779     Address target_address = Memory::Address_at(
780         sequence + (kNoCodeAgeSequenceLength - Assembler::kInstrSize));
781     Code* stub = GetCodeFromTargetAddress(target_address);
782     GetCodeAgeAndParity(stub, age, parity);
783   }
784 }
785 
786 
PatchPlatformCodeAge(Isolate * isolate,byte * sequence,Code::Age age,MarkingParity parity)787 void Code::PatchPlatformCodeAge(Isolate* isolate,
788                                 byte* sequence,
789                                 Code::Age age,
790                                 MarkingParity parity) {
791   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
792   if (age == kNoAgeCodeAge) {
793     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
794     Assembler::FlushICache(isolate, sequence, young_length);
795   } else {
796     Code* stub = GetCodeAgeStub(isolate, age, parity);
797     CodePatcher patcher(isolate, sequence,
798                         young_length / Assembler::kInstrSize);
799     patcher.masm()->add(r0, pc, Operand(-8));
800     patcher.masm()->ldr(pc, MemOperand(pc, -4));
801     patcher.masm()->emit_code_stub_address(stub);
802   }
803 }
804 
805 
806 }  // namespace internal
807 }  // namespace v8
808 
809 #endif  // V8_TARGET_ARCH_ARM
810